method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_get_mock_folder
|
return {'folder': {'title': 'runbook', 'creator_id': 'testing',
'folder_type': 'shared', 'parent_id': 'ABCD', 'inherit_mode': 'inherit',
'color': 'manila', 'id': f'{folder_id}', 'created_usec':
1668405728528904, 'updated_usec': 1697356632672453, 'link':
'https://example.quip.com/YPH9OAR2Eu5'}, 'member_ids': [], 'children':
[{'thread_id': 'ABC'}, {'thread_id': 'DEF'}]}
|
def _get_mock_folder(self, folder_id: str) ->Dict:
return {'folder': {'title': 'runbook', 'creator_id': 'testing',
'folder_type': 'shared', 'parent_id': 'ABCD', 'inherit_mode':
'inherit', 'color': 'manila', 'id': f'{folder_id}', 'created_usec':
1668405728528904, 'updated_usec': 1697356632672453, 'link':
'https://example.quip.com/YPH9OAR2Eu5'}, 'member_ids': [],
'children': [{'thread_id': 'ABC'}, {'thread_id': 'DEF'}]}
| null |
from_uri
|
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
|
@classmethod
def from_uri(cls, database_uri: str, engine_args: Optional[dict]=None, **
kwargs: Any) ->SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
|
Construct a SQLAlchemy engine from URI.
|
_chain_type
|
return 'llm_personalizer_chain'
|
@property
def _chain_type(self) ->str:
return 'llm_personalizer_chain'
| null |
_run
|
try:
unix_timestamp = dt.timestamp(dt.strptime(timestamp, UTC_FORMAT))
result = self.client.chat_scheduleMessage(channel=channel, text=message,
post_at=unix_timestamp)
output = 'Message scheduled: ' + str(result)
return output
except Exception as e:
return 'Error scheduling message: {}'.format(e)
|
def _run(self, message: str, channel: str, timestamp: str, run_manager:
Optional[CallbackManagerForToolRun]=None) ->str:
try:
unix_timestamp = dt.timestamp(dt.strptime(timestamp, UTC_FORMAT))
result = self.client.chat_scheduleMessage(channel=channel, text=
message, post_at=unix_timestamp)
output = 'Message scheduled: ' + str(result)
return output
except Exception as e:
return 'Error scheduling message: {}'.format(e)
| null |
on_tool_error
|
self.on_tool_error_common()
|
def on_tool_error(self, *args: Any, **kwargs: Any) ->Any:
self.on_tool_error_common()
| null |
test_gpt_router_generate
|
"""Test generate method of GPTRouter."""
anthropic_claude = GPTRouterModel(name='claude-instant-1.2', provider_name=
'anthropic')
chat = GPTRouter(models_priority_list=[anthropic_claude])
chat_messages: List[List[BaseMessage]] = [[HumanMessage(content=
'If (5 + x = 18), what is x?')]]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
|
def test_gpt_router_generate() ->None:
"""Test generate method of GPTRouter."""
anthropic_claude = GPTRouterModel(name='claude-instant-1.2',
provider_name='anthropic')
chat = GPTRouter(models_priority_list=[anthropic_claude])
chat_messages: List[List[BaseMessage]] = [[HumanMessage(content=
'If (5 + x = 18), what is x?')]]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
|
Test generate method of GPTRouter.
|
test_pgvector_max_marginal_relevance_search
|
"""Test max marginal relevance search."""
texts = ['foo', 'bar', 'baz']
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True)
output = docsearch.max_marginal_relevance_search('foo', k=1, fetch_k=3)
assert output == [Document(page_content='foo')]
|
def test_pgvector_max_marginal_relevance_search() ->None:
"""Test max marginal relevance search."""
texts = ['foo', 'bar', 'baz']
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True)
output = docsearch.max_marginal_relevance_search('foo', k=1, fetch_k=3)
assert output == [Document(page_content='foo')]
|
Test max marginal relevance search.
|
_stream_with_aggregation
|
final_chunk: Optional[GenerationChunk] = None
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, verbose=verbose)
if final_chunk is None:
raise ValueError('No data received from Ollama stream.')
return final_chunk
|
def _stream_with_aggregation(self, prompt: str, stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, verbose:
bool=False, **kwargs: Any) ->GenerationChunk:
final_chunk: Optional[GenerationChunk] = None
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, verbose=verbose)
if final_chunk is None:
raise ValueError('No data received from Ollama stream.')
return final_chunk
| null |
_get_topics_of_reflection
|
"""Return the 3 most salient high-level questions about recent observations."""
prompt = PromptTemplate.from_template(
"""{observations}
Given only the information above, what are the 3 most salient high-level questions we can answer about the subjects in the statements?
Provide each question on a new line."""
)
observations = self.memory_retriever.memory_stream[-last_k:]
observation_str = '\n'.join([self._format_memory_detail(o) for o in
observations])
result = self.chain(prompt).run(observations=observation_str)
return self._parse_list(result)
|
def _get_topics_of_reflection(self, last_k: int=50) ->List[str]:
"""Return the 3 most salient high-level questions about recent observations."""
prompt = PromptTemplate.from_template(
"""{observations}
Given only the information above, what are the 3 most salient high-level questions we can answer about the subjects in the statements?
Provide each question on a new line."""
)
observations = self.memory_retriever.memory_stream[-last_k:]
observation_str = '\n'.join([self._format_memory_detail(o) for o in
observations])
result = self.chain(prompt).run(observations=observation_str)
return self._parse_list(result)
|
Return the 3 most salient high-level questions about recent observations.
|
test_huggingface_instructor_embedding_query
|
"""Test huggingface embeddings."""
query = 'foo bar'
model_name = 'hkunlp/instructor-base'
embedding = HuggingFaceInstructEmbeddings(model_name=model_name)
output = embedding.embed_query(query)
assert len(output) == 768
|
def test_huggingface_instructor_embedding_query() ->None:
"""Test huggingface embeddings."""
query = 'foo bar'
model_name = 'hkunlp/instructor-base'
embedding = HuggingFaceInstructEmbeddings(model_name=model_name)
output = embedding.embed_query(query)
assert len(output) == 768
|
Test huggingface embeddings.
|
load
|
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
_document_details = self._document_details_for_docset_id(self.docset_id)
if self.document_ids:
_document_details = [d for d in _document_details if d[ID_KEY] in
self.document_ids]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata: Dict[str, Dict] = {}
if _project_details and self.include_project_metadata_in_doc_metadata:
for project in _project_details:
metadata = self._metadata_for_project(project)
for file_id in metadata:
if file_id not in combined_project_metadata:
combined_project_metadata[file_id] = metadata[file_id]
else:
combined_project_metadata[file_id].update(metadata[file_id]
)
for doc in _document_details:
doc_id = doc[ID_KEY]
doc_name = doc.get(DOCUMENT_NAME_KEY)
doc_metadata = combined_project_metadata.get(doc_id)
chunks += self._load_chunks_for_document(document_id=doc_id,
docset_id=self.docset_id, document_name=doc_name,
additional_metadata=doc_metadata)
elif self.file_paths:
for path in self.file_paths:
path = Path(path)
with open(path, 'rb') as file:
chunks += self._parse_dgml(content=file.read(), document_name=
path.name)
return chunks
|
def load(self) ->List[Document]:
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
_document_details = self._document_details_for_docset_id(self.docset_id
)
if self.document_ids:
_document_details = [d for d in _document_details if d[ID_KEY] in
self.document_ids]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata: Dict[str, Dict] = {}
if _project_details and self.include_project_metadata_in_doc_metadata:
for project in _project_details:
metadata = self._metadata_for_project(project)
for file_id in metadata:
if file_id not in combined_project_metadata:
combined_project_metadata[file_id] = metadata[file_id]
else:
combined_project_metadata[file_id].update(metadata[
file_id])
for doc in _document_details:
doc_id = doc[ID_KEY]
doc_name = doc.get(DOCUMENT_NAME_KEY)
doc_metadata = combined_project_metadata.get(doc_id)
chunks += self._load_chunks_for_document(document_id=doc_id,
docset_id=self.docset_id, document_name=doc_name,
additional_metadata=doc_metadata)
elif self.file_paths:
for path in self.file_paths:
path = Path(path)
with open(path, 'rb') as file:
chunks += self._parse_dgml(content=file.read(),
document_name=path.name)
return chunks
|
Load documents.
|
_get_tools_requests_delete
|
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
|
def _get_tools_requests_delete() ->BaseTool:
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model_url': self.model_url}, **self._default_params}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {**{'model_url': self.model_url}, **self._default_params}
|
Get the identifying parameters.
|
test_message_chunk_to_message
|
assert message_chunk_to_message(AIMessageChunk(content='I am',
additional_kwargs={'foo': 'bar'})) == AIMessage(content='I am',
additional_kwargs={'foo': 'bar'})
assert message_chunk_to_message(HumanMessageChunk(content='I am')
) == HumanMessage(content='I am')
assert message_chunk_to_message(ChatMessageChunk(role='User', content='I am')
) == ChatMessage(role='User', content='I am')
assert message_chunk_to_message(FunctionMessageChunk(name='hello', content=
'I am')) == FunctionMessage(name='hello', content='I am')
|
def test_message_chunk_to_message() ->None:
assert message_chunk_to_message(AIMessageChunk(content='I am',
additional_kwargs={'foo': 'bar'})) == AIMessage(content='I am',
additional_kwargs={'foo': 'bar'})
assert message_chunk_to_message(HumanMessageChunk(content='I am')
) == HumanMessage(content='I am')
assert message_chunk_to_message(ChatMessageChunk(role='User', content=
'I am')) == ChatMessage(role='User', content='I am')
assert message_chunk_to_message(FunctionMessageChunk(name='hello',
content='I am')) == FunctionMessage(name='hello', content='I am')
| null |
validate_code
|
try:
code_tree = ast.parse(code)
except (SyntaxError, UnicodeDecodeError):
raise ValueError(f'Generated code is not valid python code: {code}')
except TypeError:
raise ValueError(
f'Generated code is expected to be a string, instead found {type(code)}'
)
except OverflowError:
raise ValueError(
f'Generated code too long / complex to be parsed by ast: {code}')
found_solution_expr = False
if code_validations.solution_expression_name is None:
found_solution_expr = True
has_imports = False
top_level_nodes = list(ast.iter_child_nodes(code_tree))
for node in top_level_nodes:
if (code_validations.solution_expression_name is not None and
code_validations.solution_expression_type is not None):
if isinstance(node, code_validations.solution_expression_type
) and hasattr(node, 'name'
) and node.name == code_validations.solution_expression_name:
found_solution_expr = True
if isinstance(node, ast.Assign):
for target_node in node.targets:
if (isinstance(target_node, code_validations.
solution_expression_type) and hasattr(target_node, 'id'
) and target_node.id == code_validations.
solution_expression_name):
found_solution_expr = True
if isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom):
has_imports = True
if not found_solution_expr:
raise ValueError(
f'Generated code is missing the solution expression: {code_validations.solution_expression_name} of type: {code_validations.solution_expression_type}'
)
if not code_validations.allow_imports and has_imports:
raise ValueError(f'Generated code has disallowed imports: {code}')
if not code_validations.allow_command_exec or not code_validations.allow_imports:
for node in ast.walk(code_tree):
if not code_validations.allow_command_exec and isinstance(node, ast
.Call):
if hasattr(node.func, 'id'
) and node.func.id in COMMAND_EXECUTION_FUNCTIONS:
raise ValueError(
f'Found illegal command execution function {node.func.id} in code {code}'
)
if isinstance(node.func, ast.Attribute
) and node.func.attr in COMMAND_EXECUTION_FUNCTIONS:
raise ValueError(
f'Found illegal command execution function {node.func.attr} in code {code}'
)
if not code_validations.allow_imports and (isinstance(node, ast.
Import) or isinstance(node, ast.ImportFrom)):
raise ValueError(f'Generated code has disallowed imports: {code}')
|
@classmethod
def validate_code(cls, code: str, code_validations: PALValidation) ->None:
try:
code_tree = ast.parse(code)
except (SyntaxError, UnicodeDecodeError):
raise ValueError(f'Generated code is not valid python code: {code}')
except TypeError:
raise ValueError(
f'Generated code is expected to be a string, instead found {type(code)}'
)
except OverflowError:
raise ValueError(
f'Generated code too long / complex to be parsed by ast: {code}')
found_solution_expr = False
if code_validations.solution_expression_name is None:
found_solution_expr = True
has_imports = False
top_level_nodes = list(ast.iter_child_nodes(code_tree))
for node in top_level_nodes:
if (code_validations.solution_expression_name is not None and
code_validations.solution_expression_type is not None):
if isinstance(node, code_validations.solution_expression_type
) and hasattr(node, 'name'
) and node.name == code_validations.solution_expression_name:
found_solution_expr = True
if isinstance(node, ast.Assign):
for target_node in node.targets:
if (isinstance(target_node, code_validations.
solution_expression_type) and hasattr(target_node,
'id') and target_node.id == code_validations.
solution_expression_name):
found_solution_expr = True
if isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom):
has_imports = True
if not found_solution_expr:
raise ValueError(
f'Generated code is missing the solution expression: {code_validations.solution_expression_name} of type: {code_validations.solution_expression_type}'
)
if not code_validations.allow_imports and has_imports:
raise ValueError(f'Generated code has disallowed imports: {code}')
if (not code_validations.allow_command_exec or not code_validations.
allow_imports):
for node in ast.walk(code_tree):
if not code_validations.allow_command_exec and isinstance(node,
ast.Call):
if hasattr(node.func, 'id'
) and node.func.id in COMMAND_EXECUTION_FUNCTIONS:
raise ValueError(
f'Found illegal command execution function {node.func.id} in code {code}'
)
if isinstance(node.func, ast.Attribute
) and node.func.attr in COMMAND_EXECUTION_FUNCTIONS:
raise ValueError(
f'Found illegal command execution function {node.func.attr} in code {code}'
)
if not code_validations.allow_imports and (isinstance(node, ast
.Import) or isinstance(node, ast.ImportFrom)):
raise ValueError(
f'Generated code has disallowed imports: {code}')
| null |
_on_tool_end
|
"""Process the Tool Run."""
self._process_end_trace(run)
|
def _on_tool_end(self, run: 'Run') ->None:
"""Process the Tool Run."""
self._process_end_trace(run)
|
Process the Tool Run.
|
test_parse_partial_json
|
case, expected = json_strings
parsed = parse_partial_json(case)
assert parsed == json.loads(expected)
|
@pytest.mark.parametrize('json_strings', TEST_CASES_PARTIAL)
def test_parse_partial_json(json_strings: Tuple[str, str]) ->None:
case, expected = json_strings
parsed = parse_partial_json(case)
assert parsed == json.loads(expected)
| null |
extension
|
return 'parquet'
|
@classmethod
def extension(cls) ->str:
return 'parquet'
| null |
fill
|
"""Indent a piece of text, according to the current indentation level"""
self.f.write('\n' + ' ' * self._indent + text)
|
def fill(self, text=''):
"""Indent a piece of text, according to the current indentation level"""
self.f.write('\n' + ' ' * self._indent + text)
|
Indent a piece of text, according to the current indentation level
|
agent_executor
|
self._agent_executor = agent_executor
self.inputs = self.inputs
|
@agent_executor.setter
def agent_executor(self, agent_executor: AgentExecutor) ->None:
self._agent_executor = agent_executor
self.inputs = self.inputs
| null |
test_simple_text
|
"""Test simple question that should not need python."""
question = 'a'
output = fake_llm_summarization_checker_chain.run(question)
assert output == 'b'
|
def test_simple_text(fake_llm_summarization_checker_chain:
LLMSummarizationCheckerChain) ->None:
"""Test simple question that should not need python."""
question = 'a'
output = fake_llm_summarization_checker_chain.run(question)
assert output == 'b'
|
Test simple question that should not need python.
|
embed_documents
|
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
| null |
get_lists
|
"""
Get all available lists.
"""
url = f'{DEFAULT_URL}/folder/{self.folder_id}/list'
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {'response': response}
|
def get_lists(self) ->Dict:
"""
Get all available lists.
"""
url = f'{DEFAULT_URL}/folder/{self.folder_id}/list'
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {'response': response}
|
Get all available lists.
|
_run
|
"""Use the tool."""
query_params = {'file_url': query, 'language': self.language,
'attributes_as_list': False}
return self._call_eden_ai(query_params)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
query_params = {'file_url': query, 'language': self.language,
'attributes_as_list': False}
return self._call_eden_ai(query_params)
|
Use the tool.
|
_import_graphql
|
from langchain_community.utilities.graphql import GraphQLAPIWrapper
return GraphQLAPIWrapper
|
def _import_graphql() ->Any:
from langchain_community.utilities.graphql import GraphQLAPIWrapper
return GraphQLAPIWrapper
| null |
embed_documents
|
"""Return simple embeddings."""
return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in range(
len(texts))]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Return simple embeddings."""
return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in
range(len(texts))]
|
Return simple embeddings.
|
__getattr__
|
"""Get attr name."""
if name == 'create_xorbits_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
def __getattr__(name: str) ->Any:
"""Get attr name."""
if name == 'create_xorbits_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
Get attr name.
|
__add__
|
if isinstance(other, BaseMessageChunk):
return self.__class__(content=merge_content(self.content, other.content
), additional_kwargs=self._merge_kwargs_dict(self.additional_kwargs,
other.additional_kwargs))
else:
raise TypeError(
f'unsupported operand type(s) for +: "{self.__class__.__name__}" and "{other.__class__.__name__}"'
)
|
def __add__(self, other: Any) ->BaseMessageChunk:
if isinstance(other, BaseMessageChunk):
return self.__class__(content=merge_content(self.content, other.
content), additional_kwargs=self._merge_kwargs_dict(self.
additional_kwargs, other.additional_kwargs))
else:
raise TypeError(
f'unsupported operand type(s) for +: "{self.__class__.__name__}" and "{other.__class__.__name__}"'
)
| null |
parse
|
raise ValueError('Can only parse messages')
|
def parse(self, text: str) ->Union[AgentAction, AgentFinish]:
raise ValueError('Can only parse messages')
| null |
parse
|
"""Parse the output of an LLM call to a boolean.
Args:
text: output of a language model
Returns:
boolean
"""
cleaned_text = text.strip()
if cleaned_text.upper() not in (self.true_val.upper(), self.false_val.upper()):
raise ValueError(
f'BooleanOutputParser expected output value to either be {self.true_val} or {self.false_val}. Received {cleaned_text}.'
)
return cleaned_text.upper() == self.true_val.upper()
|
def parse(self, text: str) ->bool:
"""Parse the output of an LLM call to a boolean.
Args:
text: output of a language model
Returns:
boolean
"""
cleaned_text = text.strip()
if cleaned_text.upper() not in (self.true_val.upper(), self.false_val.
upper()):
raise ValueError(
f'BooleanOutputParser expected output value to either be {self.true_val} or {self.false_val}. Received {cleaned_text}.'
)
return cleaned_text.upper() == self.true_val.upper()
|
Parse the output of an LLM call to a boolean.
Args:
text: output of a language model
Returns:
boolean
|
_get_docs
|
"""Get docs."""
|
@abstractmethod
def _get_docs(self, question: str, inputs: Dict[str, Any], *, run_manager:
CallbackManagerForChainRun) ->List[Document]:
"""Get docs."""
|
Get docs.
|
on_retriever_error
|
if parent_run_id is None:
self.increment()
|
def on_retriever_error(self, error: BaseException, *, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
if parent_run_id is None:
self.increment()
| null |
add_texts
|
"""Add more texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
from psycopg2 import sql
texts = list(texts)
cursor = self._connection.cursor()
embeddings = self._embedding.embed_documents(list(texts))
results = []
if not metadatas:
metadatas = [{} for _ in texts]
for id in range(len(embeddings)):
doc_uuid = uuid.uuid4()
results.append(str(doc_uuid))
data_input = [(str(id), embedding_id, text, json.dumps(metadata),
embedding) for id, embedding_id, text, metadata, embedding in zip(
repeat(doc_uuid), range(len(embeddings[id])), repeat(texts[id]),
repeat(metadatas[id]), embeddings[id])]
flattened_input = [val for sublist in data_input for val in sublist]
insert_query = sql.SQL(
'INSERT INTO {t} (id, embedding_id, text, metadata, embedding) VALUES {v}'
).format(t=sql.Identifier(self._table), v=sql.SQL(',').join([sql.
SQL('(%s,%s,%s,%s,%s)') for _ in range(len(embeddings[id]))]))
cursor.execute(insert_query, flattened_input)
self._connection.commit()
return results
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Add more texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
from psycopg2 import sql
texts = list(texts)
cursor = self._connection.cursor()
embeddings = self._embedding.embed_documents(list(texts))
results = []
if not metadatas:
metadatas = [{} for _ in texts]
for id in range(len(embeddings)):
doc_uuid = uuid.uuid4()
results.append(str(doc_uuid))
data_input = [(str(id), embedding_id, text, json.dumps(metadata),
embedding) for id, embedding_id, text, metadata, embedding in
zip(repeat(doc_uuid), range(len(embeddings[id])), repeat(texts[
id]), repeat(metadatas[id]), embeddings[id])]
flattened_input = [val for sublist in data_input for val in sublist]
insert_query = sql.SQL(
'INSERT INTO {t} (id, embedding_id, text, metadata, embedding) VALUES {v}'
).format(t=sql.Identifier(self._table), v=sql.SQL(',').join([
sql.SQL('(%s,%s,%s,%s,%s)') for _ in range(len(embeddings[id]))]))
cursor.execute(insert_query, flattened_input)
self._connection.commit()
return results
|
Add more texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
|
load
|
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with open(self.file_path, 'r', encoding=self.open_encoding) as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ''
metadata: Dict[str, Union[str, None]] = {'source': self.file_path, 'title':
title}
return [Document(page_content=text, metadata=metadata)]
|
def load(self) ->List[Document]:
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with open(self.file_path, 'r', encoding=self.open_encoding) as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ''
metadata: Dict[str, Union[str, None]] = {'source': self.file_path,
'title': title}
return [Document(page_content=text, metadata=metadata)]
|
Load HTML document into document objects.
|
from_gitlab_api_wrapper
|
operations: List[Dict] = [{'mode': 'get_issues', 'name': 'Get Issues',
'description': GET_ISSUES_PROMPT}, {'mode': 'get_issue', 'name':
'Get Issue', 'description': GET_ISSUE_PROMPT}, {'mode':
'comment_on_issue', 'name': 'Comment on Issue', 'description':
COMMENT_ON_ISSUE_PROMPT}, {'mode': 'create_pull_request', 'name':
'Create Pull Request', 'description': CREATE_PULL_REQUEST_PROMPT}, {
'mode': 'create_file', 'name': 'Create File', 'description':
CREATE_FILE_PROMPT}, {'mode': 'read_file', 'name': 'Read File',
'description': READ_FILE_PROMPT}, {'mode': 'update_file', 'name':
'Update File', 'description': UPDATE_FILE_PROMPT}, {'mode':
'delete_file', 'name': 'Delete File', 'description': DELETE_FILE_PROMPT}]
tools = [GitLabAction(name=action['name'], description=action['description'
], mode=action['mode'], api_wrapper=gitlab_api_wrapper) for action in
operations]
return cls(tools=tools)
|
@classmethod
def from_gitlab_api_wrapper(cls, gitlab_api_wrapper: GitLabAPIWrapper
) ->'GitLabToolkit':
operations: List[Dict] = [{'mode': 'get_issues', 'name': 'Get Issues',
'description': GET_ISSUES_PROMPT}, {'mode': 'get_issue', 'name':
'Get Issue', 'description': GET_ISSUE_PROMPT}, {'mode':
'comment_on_issue', 'name': 'Comment on Issue', 'description':
COMMENT_ON_ISSUE_PROMPT}, {'mode': 'create_pull_request', 'name':
'Create Pull Request', 'description': CREATE_PULL_REQUEST_PROMPT},
{'mode': 'create_file', 'name': 'Create File', 'description':
CREATE_FILE_PROMPT}, {'mode': 'read_file', 'name': 'Read File',
'description': READ_FILE_PROMPT}, {'mode': 'update_file', 'name':
'Update File', 'description': UPDATE_FILE_PROMPT}, {'mode':
'delete_file', 'name': 'Delete File', 'description':
DELETE_FILE_PROMPT}]
tools = [GitLabAction(name=action['name'], description=action[
'description'], mode=action['mode'], api_wrapper=gitlab_api_wrapper
) for action in operations]
return cls(tools=tools)
| null |
go_to_page
|
self.page.goto(url=url if '://' in url else 'http://' + url)
self.client = self.page.context.new_cdp_session(self.page)
self.page_element_buffer = {}
|
def go_to_page(self, url: str) ->None:
self.page.goto(url=url if '://' in url else 'http://' + url)
self.client = self.page.context.new_cdp_session(self.page)
self.page_element_buffer = {}
| null |
__del__
|
if self._connection:
self._connection.close()
|
def __del__(self) ->None:
if self._connection:
self._connection.close()
| null |
_get_mock_thread
|
return {'thread': {'author_id': 'testing', 'thread_class': 'document',
'owning_company_id': 'ABC', 'id': f'{thread_id}', 'created_usec':
1690873126670055, 'updated_usec': 1690874891638991, 'title':
f'Unit Test Doc {thread_id}', 'link':
f'https://example.quip.com/{thread_id}', 'document_id': 'ABC', 'type':
'document', 'is_template': False, 'is_deleted': False}, 'user_ids': [],
'shared_folder_ids': ['ABC'], 'expanded_user_ids': ['ABCDEFG'],
'invited_user_emails': [], 'access_levels': {'ABCD': {'access_level':
'OWN'}}, 'html': "<h1 id='temp:C:ABCD'>How to write Python Test </h1>"}
|
def _get_mock_thread(self, thread_id: str) ->Dict:
return {'thread': {'author_id': 'testing', 'thread_class': 'document',
'owning_company_id': 'ABC', 'id': f'{thread_id}', 'created_usec':
1690873126670055, 'updated_usec': 1690874891638991, 'title':
f'Unit Test Doc {thread_id}', 'link':
f'https://example.quip.com/{thread_id}', 'document_id': 'ABC',
'type': 'document', 'is_template': False, 'is_deleted': False},
'user_ids': [], 'shared_folder_ids': ['ABC'], 'expanded_user_ids':
['ABCDEFG'], 'invited_user_emails': [], 'access_levels': {'ABCD': {
'access_level': 'OWN'}}, 'html':
"<h1 id='temp:C:ABCD'>How to write Python Test </h1>"}
| null |
test_api_key_masked_when_passed_via_constructor
|
llm = EmbaasEmbeddings(embaas_api_key='secret-api-key')
print(llm.embaas_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
llm = EmbaasEmbeddings(embaas_api_key='secret-api-key')
print(llm.embaas_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
metadata_column
|
return ''
|
@property
def metadata_column(self) ->str:
return ''
| null |
_parse_tables
|
result = []
for table in tables:
rc, cc = table.row_count, table.column_count
_table = [['' for _ in range(cc)] for _ in range(rc)]
for cell in table.cells:
_table[cell.row_index][cell.column_index] = cell.content
result.append(_table)
return result
|
def _parse_tables(self, tables: List[Any]) ->List[Any]:
result = []
for table in tables:
rc, cc = table.row_count, table.column_count
_table = [['' for _ in range(cc)] for _ in range(rc)]
for cell in table.cells:
_table[cell.row_index][cell.column_index] = cell.content
result.append(_table)
return result
| null |
test_edenai_call
|
"""Test simple call to edenai's text to speech endpoint."""
text2speech = EdenAiTextToSpeechTool(providers=['amazon'], language='en',
voice='MALE')
output = text2speech('hello')
parsed_url = urlparse(output)
assert text2speech.name == 'edenai_text_to_speech'
assert text2speech.feature == 'audio'
assert text2speech.subfeature == 'text_to_speech'
assert isinstance(output, str)
assert parsed_url.scheme in ['http', 'https']
|
def test_edenai_call() ->None:
"""Test simple call to edenai's text to speech endpoint."""
text2speech = EdenAiTextToSpeechTool(providers=['amazon'], language=
'en', voice='MALE')
output = text2speech('hello')
parsed_url = urlparse(output)
assert text2speech.name == 'edenai_text_to_speech'
assert text2speech.feature == 'audio'
assert text2speech.subfeature == 'text_to_speech'
assert isinstance(output, str)
assert parsed_url.scheme in ['http', 'https']
|
Test simple call to edenai's text to speech endpoint.
|
test_anonymize_with_custom_operator
|
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {'PERSON': OperatorConfig('replace', {'new_value': 'NAME'})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = 'Jane Doe was here.'
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == 'NAME was here.'
|
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
def test_anonymize_with_custom_operator() ->None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {'PERSON': OperatorConfig('replace', {'new_value':
'NAME'})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = 'Jane Doe was here.'
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == 'NAME was here.'
|
Test anonymize a name with a custom operator
|
test_importable_all
|
for path in glob.glob('../langchain/langchain/*'):
relative_path = Path(path).parts[-1]
if relative_path.endswith('.typed'):
continue
module_name = relative_path.split('.')[0]
module = importlib.import_module('langchain.' + module_name)
all_ = getattr(module, '__all__', [])
for cls_ in all_:
getattr(module, cls_)
|
def test_importable_all() ->None:
for path in glob.glob('../langchain/langchain/*'):
relative_path = Path(path).parts[-1]
if relative_path.endswith('.typed'):
continue
module_name = relative_path.split('.')[0]
module = importlib.import_module('langchain.' + module_name)
all_ = getattr(module, '__all__', [])
for cls_ in all_:
getattr(module, cls_)
| null |
zep_summary
|
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory or not zep_memory.summary:
return None
return zep_memory.summary.content
|
@property
def zep_summary(self) ->Optional[str]:
"""Retrieve summary from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory or not zep_memory.summary:
return None
return zep_memory.summary.content
|
Retrieve summary from Zep memory
|
knn_hybrid_search
|
"""
Perform a hybrid k-NN and text search on the Elasticsearch index.
Args:
query (str, optional): The query text to search for.
k (int, optional): The number of nearest neighbors to return.
query_vector (List[float], optional): The query vector to search for.
model_id (str, optional): The ID of the model to use for transforming the
query text into a vector.
size (int, optional): The number of search results to return.
source (bool, optional): Whether to return the source of the search results.
knn_boost (float, optional): The boost value to apply to the k-NN search
results.
query_boost (float, optional): The boost value to apply to the text search
results.
fields (List[Mapping[str, Any]], optional): The fields to return in the
search results.
page_content (str, optional): The name of the field that contains the page
content.
Returns:
A list of tuples, where each tuple contains a Document object and a score.
"""
if not source and (fields is None or not any(page_content in field for
field in fields)):
raise ValueError('If source=False `page_content` field must be in `fields`'
)
knn_query_body = self._default_knn_query(query_vector=query_vector, query=
query, model_id=model_id, k=k)
knn_query_body['boost'] = knn_boost
match_query_body = {'match': {self.query_field: {'query': query, 'boost':
query_boost}}}
response = self.client.search(index=self.index_name, query=match_query_body,
knn=knn_query_body, fields=fields, size=size, source=source)
hits = [hit for hit in response['hits']['hits']]
docs_and_scores = [(Document(page_content=hit['_source'][page_content] if
source else hit['fields'][page_content][0], metadata=hit['fields'] if
fields else {}), hit['_score']) for hit in hits]
return docs_and_scores
|
def knn_hybrid_search(self, query: Optional[str]=None, k: Optional[int]=10,
query_vector: Optional[List[float]]=None, model_id: Optional[str]=None,
size: Optional[int]=10, source: Optional[bool]=True, knn_boost:
Optional[float]=0.9, query_boost: Optional[float]=0.1, fields: Optional
[Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None]]=
None, page_content: Optional[str]='text') ->List[Tuple[Document, float]]:
"""
Perform a hybrid k-NN and text search on the Elasticsearch index.
Args:
query (str, optional): The query text to search for.
k (int, optional): The number of nearest neighbors to return.
query_vector (List[float], optional): The query vector to search for.
model_id (str, optional): The ID of the model to use for transforming the
query text into a vector.
size (int, optional): The number of search results to return.
source (bool, optional): Whether to return the source of the search results.
knn_boost (float, optional): The boost value to apply to the k-NN search
results.
query_boost (float, optional): The boost value to apply to the text search
results.
fields (List[Mapping[str, Any]], optional): The fields to return in the
search results.
page_content (str, optional): The name of the field that contains the page
content.
Returns:
A list of tuples, where each tuple contains a Document object and a score.
"""
if not source and (fields is None or not any(page_content in field for
field in fields)):
raise ValueError(
'If source=False `page_content` field must be in `fields`')
knn_query_body = self._default_knn_query(query_vector=query_vector,
query=query, model_id=model_id, k=k)
knn_query_body['boost'] = knn_boost
match_query_body = {'match': {self.query_field: {'query': query,
'boost': query_boost}}}
response = self.client.search(index=self.index_name, query=
match_query_body, knn=knn_query_body, fields=fields, size=size,
source=source)
hits = [hit for hit in response['hits']['hits']]
docs_and_scores = [(Document(page_content=hit['_source'][page_content] if
source else hit['fields'][page_content][0], metadata=hit['fields'] if
fields else {}), hit['_score']) for hit in hits]
return docs_and_scores
|
Perform a hybrid k-NN and text search on the Elasticsearch index.
Args:
query (str, optional): The query text to search for.
k (int, optional): The number of nearest neighbors to return.
query_vector (List[float], optional): The query vector to search for.
model_id (str, optional): The ID of the model to use for transforming the
query text into a vector.
size (int, optional): The number of search results to return.
source (bool, optional): Whether to return the source of the search results.
knn_boost (float, optional): The boost value to apply to the k-NN search
results.
query_boost (float, optional): The boost value to apply to the text search
results.
fields (List[Mapping[str, Any]], optional): The fields to return in the
search results.
page_content (str, optional): The name of the field that contains the page
content.
Returns:
A list of tuples, where each tuple contains a Document object and a score.
|
_deduplicate_in_order
|
"""Deduplicate a list of hashed documents while preserving order."""
seen: Set[str] = set()
for hashed_doc in hashed_documents:
if hashed_doc.hash_ not in seen:
seen.add(hashed_doc.hash_)
yield hashed_doc
|
def _deduplicate_in_order(hashed_documents: Iterable[_HashedDocument]
) ->Iterator[_HashedDocument]:
"""Deduplicate a list of hashed documents while preserving order."""
seen: Set[str] = set()
for hashed_doc in hashed_documents:
if hashed_doc.hash_ not in seen:
seen.add(hashed_doc.hash_)
yield hashed_doc
|
Deduplicate a list of hashed documents while preserving order.
|
pytest_addoption
|
"""Add custom command line options to pytest."""
parser.addoption('--only-extended', action='store_true', help=
'Only run extended tests. Does not allow skipping any extended tests.')
parser.addoption('--only-core', action='store_true', help=
'Only run core tests. Never runs any extended tests.')
|
def pytest_addoption(parser: Parser) ->None:
"""Add custom command line options to pytest."""
parser.addoption('--only-extended', action='store_true', help=
'Only run extended tests. Does not allow skipping any extended tests.')
parser.addoption('--only-core', action='store_true', help=
'Only run core tests. Never runs any extended tests.')
|
Add custom command line options to pytest.
|
from_texts
|
jagstore = cls(pod, store, vector_index, vector_type, vector_dimension, url,
embedding)
jagstore.login(jaguar_api_key)
jagstore.clear()
jagstore.add_texts(texts, metadatas, **kwargs)
return jagstore
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, url: str, pod:
str, store: str, vector_index: str, vector_type: str, vector_dimension:
int, metadatas: Optional[List[dict]]=None, jaguar_api_key: Optional[str
]='', **kwargs: Any) ->Jaguar:
jagstore = cls(pod, store, vector_index, vector_type, vector_dimension,
url, embedding)
jagstore.login(jaguar_api_key)
jagstore.clear()
jagstore.add_texts(texts, metadatas, **kwargs)
return jagstore
| null |
test_anthropic_streaming_callback
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(streaming=True, callback_manager=callback_manager, verbose=True
)
llm('Write me a sentence with 100 words.')
assert callback_handler.llm_streams > 1
|
def test_anthropic_streaming_callback() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Anthropic(streaming=True, callback_manager=callback_manager,
verbose=True)
llm('Write me a sentence with 100 words.')
assert callback_handler.llm_streams > 1
|
Test that streaming correctly invokes on_llm_new_token callback.
|
program
|
return items
|
def program(self, *items: Any) ->tuple:
return items
| null |
test_mhtml_loader
|
"""Test mhtml loader."""
file_path = EXAMPLES / 'example.mht'
loader = MHTMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
metadata = docs[0].metadata
content = docs[0].page_content
assert metadata['title'] == 'LangChain'
assert metadata['source'] == str(file_path)
assert 'LANG CHAIN 🦜️🔗Official Home Page' in content
|
@pytest.mark.requires('bs4', 'lxml')
def test_mhtml_loader() ->None:
"""Test mhtml loader."""
file_path = EXAMPLES / 'example.mht'
loader = MHTMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
metadata = docs[0].metadata
content = docs[0].page_content
assert metadata['title'] == 'LangChain'
assert metadata['source'] == str(file_path)
assert 'LANG CHAIN 🦜️🔗Official Home Page' in content
|
Test mhtml loader.
|
test_chat_model_caching_params
|
prompt: List[BaseMessage] = [HumanMessage(content='How are you?')]
response = 'Test response'
cached_response = 'Cached test response'
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if get_llm_cache():
get_llm_cache().update(prompt=dumps(prompt), llm_string=llm.
_get_llm_string(functions=[]), return_val=[ChatGeneration(message=
cached_message)])
result = llm(prompt, functions=[])
assert isinstance(result, AIMessage)
assert result.content == cached_response
result_no_params = llm(prompt)
assert isinstance(result_no_params, AIMessage)
assert result_no_params.content == response
else:
raise ValueError(
'The cache not set. This should never happen, as the pytest fixture `set_cache_and_teardown` always sets the cache.'
)
|
def test_chat_model_caching_params() ->None:
prompt: List[BaseMessage] = [HumanMessage(content='How are you?')]
response = 'Test response'
cached_response = 'Cached test response'
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if get_llm_cache():
get_llm_cache().update(prompt=dumps(prompt), llm_string=llm.
_get_llm_string(functions=[]), return_val=[ChatGeneration(
message=cached_message)])
result = llm(prompt, functions=[])
assert isinstance(result, AIMessage)
assert result.content == cached_response
result_no_params = llm(prompt)
assert isinstance(result_no_params, AIMessage)
assert result_no_params.content == response
else:
raise ValueError(
'The cache not set. This should never happen, as the pytest fixture `set_cache_and_teardown` always sets the cache.'
)
| null |
get_next_task
|
"""Get the next task."""
task_names = [t['task_name'] for t in self.task_list]
incomplete_tasks = ', '.join(task_names)
response = self.task_creation_chain.run(result=result, task_description=
task_description, incomplete_tasks=incomplete_tasks, objective=
objective, **kwargs)
new_tasks = response.split('\n')
return [{'task_name': task_name} for task_name in new_tasks if task_name.
strip()]
|
def get_next_task(self, result: str, task_description: str, objective: str,
**kwargs: Any) ->List[Dict]:
"""Get the next task."""
task_names = [t['task_name'] for t in self.task_list]
incomplete_tasks = ', '.join(task_names)
response = self.task_creation_chain.run(result=result, task_description
=task_description, incomplete_tasks=incomplete_tasks, objective=
objective, **kwargs)
new_tasks = response.split('\n')
return [{'task_name': task_name} for task_name in new_tasks if
task_name.strip()]
|
Get the next task.
|
similarity_search_with_score
|
"""
Return list of documents most similar to the query
text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding is None:
raise ValueError(
'_embedding cannot be None for similarity_search_with_score')
content: Dict[str, Any] = {'concepts': [query]}
if kwargs.get('search_distance'):
content['certainty'] = kwargs.get('search_distance')
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get('where_filter'):
query_obj = query_obj.with_where(kwargs.get('where_filter'))
if kwargs.get('tenant'):
query_obj = query_obj.with_tenant(kwargs.get('tenant'))
embedded_query = self._embedding.embed_query(query)
if not self._by_text:
vector = {'vector': embedded_query}
result = query_obj.with_near_vector(vector).with_limit(k).with_additional(
'vector').do()
else:
result = query_obj.with_near_text(content).with_limit(k).with_additional(
'vector').do()
if 'errors' in result:
raise ValueError(f"Error during query: {result['errors']}")
docs_and_scores = []
for res in result['data']['Get'][self._index_name]:
text = res.pop(self._text_key)
score = np.dot(res['_additional']['vector'], embedded_query)
docs_and_scores.append((Document(page_content=text, metadata=res), score))
return docs_and_scores
|
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""
Return list of documents most similar to the query
text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding is None:
raise ValueError(
'_embedding cannot be None for similarity_search_with_score')
content: Dict[str, Any] = {'concepts': [query]}
if kwargs.get('search_distance'):
content['certainty'] = kwargs.get('search_distance')
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get('where_filter'):
query_obj = query_obj.with_where(kwargs.get('where_filter'))
if kwargs.get('tenant'):
query_obj = query_obj.with_tenant(kwargs.get('tenant'))
embedded_query = self._embedding.embed_query(query)
if not self._by_text:
vector = {'vector': embedded_query}
result = query_obj.with_near_vector(vector).with_limit(k
).with_additional('vector').do()
else:
result = query_obj.with_near_text(content).with_limit(k
).with_additional('vector').do()
if 'errors' in result:
raise ValueError(f"Error during query: {result['errors']}")
docs_and_scores = []
for res in result['data']['Get'][self._index_name]:
text = res.pop(self._text_key)
score = np.dot(res['_additional']['vector'], embedded_query)
docs_and_scores.append((Document(page_content=text, metadata=res),
score))
return docs_and_scores
|
Return list of documents most similar to the query
text and cosine distance in float for each.
Lower score represents more similarity.
|
_import_google_serper_tool_GoogleSerperRun
|
from langchain_community.tools.google_serper.tool import GoogleSerperRun
return GoogleSerperRun
|
def _import_google_serper_tool_GoogleSerperRun() ->Any:
from langchain_community.tools.google_serper.tool import GoogleSerperRun
return GoogleSerperRun
| null |
_completion_with_retry
|
return _make_request(llm, **_kwargs)
|
@retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
return _make_request(llm, **_kwargs)
| null |
test_timeout_kwargs
|
"""Test that timeout kwarg works."""
chat = AzureMLChatOnlineEndpoint(content_formatter=LlamaContentFormatter())
response = chat(messages=[HumanMessage(content='FOO')], timeout=60)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_timeout_kwargs() ->None:
"""Test that timeout kwarg works."""
chat = AzureMLChatOnlineEndpoint(content_formatter=LlamaContentFormatter())
response = chat(messages=[HumanMessage(content='FOO')], timeout=60)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test that timeout kwarg works.
|
_get_relevant_documents
|
from sklearn.metrics.pairwise import cosine_similarity
query_vec = self.vectorizer.transform([query])
results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
return_docs = [self.docs[i] for i in results.argsort()[-self.k:][::-1]]
return return_docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
from sklearn.metrics.pairwise import cosine_similarity
query_vec = self.vectorizer.transform([query])
results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))
return_docs = [self.docs[i] for i in results.argsort()[-self.k:][::-1]]
return return_docs
| null |
_create_retry_decorator
|
import openai
errors = [openai.error.Timeout, openai.error.APIError, openai.error.
APIConnectionError, openai.error.RateLimitError, openai.error.
ServiceUnavailableError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
def _create_retry_decorator(llm: ChatOpenAI, run_manager: Optional[Union[
AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None) ->Callable[
[Any], Any]:
import openai
errors = [openai.error.Timeout, openai.error.APIError, openai.error.
APIConnectionError, openai.error.RateLimitError, openai.error.
ServiceUnavailableError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
| null |
delete_session
|
"""Delete a session"""
requests.delete(f'{self.url}/sessions/{self.session_id}/memory')
|
def delete_session(self) ->None:
"""Delete a session"""
requests.delete(f'{self.url}/sessions/{self.session_id}/memory')
|
Delete a session
|
_search_rows
|
prompt_pd5 = self.get_md5(prompt)
stmt = select(self.cache_schema.response).where(self.cache_schema.
prompt_md5 == prompt_pd5).where(self.cache_schema.llm == llm_string).where(
self.cache_schema.prompt == prompt).order_by(self.cache_schema.idx)
with Session(self.engine) as session:
return session.execute(stmt).fetchall()
|
def _search_rows(self, prompt: str, llm_string: str) ->List[Row]:
prompt_pd5 = self.get_md5(prompt)
stmt = select(self.cache_schema.response).where(self.cache_schema.
prompt_md5 == prompt_pd5).where(self.cache_schema.llm == llm_string
).where(self.cache_schema.prompt == prompt).order_by(self.
cache_schema.idx)
with Session(self.engine) as session:
return session.execute(stmt).fetchall()
| null |
get_retriever_with_metadata
|
start_dt = x.get('start_date', None)
end_dt = x.get('end_date', None)
metadata_filter = x.get('metadata_filter', None)
opt = {}
if start_dt is not None:
opt['start_date'] = start_dt
if end_dt is not None:
opt['end_date'] = end_dt
if metadata_filter is not None:
opt['filter'] = metadata_filter
v = vectorstore.as_retriever(search_kwargs=opt)
return RunnableLambda(itemgetter('retriever_query')) | v
|
def get_retriever_with_metadata(x):
start_dt = x.get('start_date', None)
end_dt = x.get('end_date', None)
metadata_filter = x.get('metadata_filter', None)
opt = {}
if start_dt is not None:
opt['start_date'] = start_dt
if end_dt is not None:
opt['end_date'] = end_dt
if metadata_filter is not None:
opt['filter'] = metadata_filter
v = vectorstore.as_retriever(search_kwargs=opt)
return RunnableLambda(itemgetter('retriever_query')) | v
| null |
_to_snake_case
|
"""Convert a name into snake_case."""
snake_case = ''
for i, char in enumerate(name):
if char.isupper() and i != 0:
snake_case += '_' + char.lower()
else:
snake_case += char.lower()
return snake_case
|
def _to_snake_case(name: str) ->str:
"""Convert a name into snake_case."""
snake_case = ''
for i, char in enumerate(name):
if char.isupper() and i != 0:
snake_case += '_' + char.lower()
else:
snake_case += char.lower()
return snake_case
|
Convert a name into snake_case.
|
delete
|
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
if ids is None:
raise ValueError('No ids provided to delete.')
for document_id in ids:
self.delete_by_document_id(document_id)
return True
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool
]:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
if ids is None:
raise ValueError('No ids provided to delete.')
for document_id in ids:
self.delete_by_document_id(document_id)
return True
|
Delete by vector IDs.
Args:
ids: List of ids to delete.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
|
_collection_is_ready
|
"""Checks whether the collection for this message history is ready
to be queried
"""
return self.client.Collections.get(collection=self.collection
).data.status == 'READY'
|
def _collection_is_ready(self) ->bool:
"""Checks whether the collection for this message history is ready
to be queried
"""
return self.client.Collections.get(collection=self.collection
).data.status == 'READY'
|
Checks whether the collection for this message history is ready
to be queried
|
test_base_tool_inheritance_base_schema
|
"""Test schema is correctly inferred when inheriting from BaseTool."""
class _MockSimpleTool(BaseTool):
name: str = 'simple_tool'
description: str = 'A Simple Tool'
def _run(self, tool_input: str) ->str:
return f'{tool_input}'
async def _arun(self, tool_input: str) ->str:
raise NotImplementedError
simple_tool = _MockSimpleTool()
assert simple_tool.args_schema is None
expected_args = {'tool_input': {'title': 'Tool Input', 'type': 'string'}}
assert simple_tool.args == expected_args
|
def test_base_tool_inheritance_base_schema() ->None:
"""Test schema is correctly inferred when inheriting from BaseTool."""
class _MockSimpleTool(BaseTool):
name: str = 'simple_tool'
description: str = 'A Simple Tool'
def _run(self, tool_input: str) ->str:
return f'{tool_input}'
async def _arun(self, tool_input: str) ->str:
raise NotImplementedError
simple_tool = _MockSimpleTool()
assert simple_tool.args_schema is None
expected_args = {'tool_input': {'title': 'Tool Input', 'type': 'string'}}
assert simple_tool.args == expected_args
|
Test schema is correctly inferred when inheriting from BaseTool.
|
distance_strategy
|
if self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self.EmbeddingStore.embedding.l2_distance
elif self._distance_strategy == DistanceStrategy.COSINE:
return self.EmbeddingStore.embedding.cosine_distance
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self.EmbeddingStore.embedding.max_inner_product
else:
raise ValueError(
f"Got unexpected value for distance: {self._distance_strategy}. Should be one of {', '.join([ds.value for ds in DistanceStrategy])}."
)
|
@property
def distance_strategy(self) ->Any:
if self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self.EmbeddingStore.embedding.l2_distance
elif self._distance_strategy == DistanceStrategy.COSINE:
return self.EmbeddingStore.embedding.cosine_distance
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self.EmbeddingStore.embedding.max_inner_product
else:
raise ValueError(
f"Got unexpected value for distance: {self._distance_strategy}. Should be one of {', '.join([ds.value for ds in DistanceStrategy])}."
)
| null |
from_llm
|
"""Create a chain from an LLM."""
critique_chain = LLMChain(llm=llm, prompt=critique_prompt)
revision_chain = LLMChain(llm=llm, prompt=revision_prompt)
return cls(chain=chain, critique_chain=critique_chain, revision_chain=
revision_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, chain: LLMChain, critique_prompt:
BasePromptTemplate=CRITIQUE_PROMPT, revision_prompt: BasePromptTemplate
=REVISION_PROMPT, **kwargs: Any) ->'ConstitutionalChain':
"""Create a chain from an LLM."""
critique_chain = LLMChain(llm=llm, prompt=critique_prompt)
revision_chain = LLMChain(llm=llm, prompt=revision_prompt)
return cls(chain=chain, critique_chain=critique_chain, revision_chain=
revision_chain, **kwargs)
|
Create a chain from an LLM.
|
_run_output_key
|
if len(self.output_keys) != 1:
raise ValueError(
f'`run` not supported when there is not exactly one output key. Got {self.output_keys}.'
)
return self.output_keys[0]
|
@property
def _run_output_key(self) ->str:
if len(self.output_keys) != 1:
raise ValueError(
f'`run` not supported when there is not exactly one output key. Got {self.output_keys}.'
)
return self.output_keys[0]
| null |
_load_examples
|
"""Load examples if necessary."""
if isinstance(config['examples'], list):
pass
elif isinstance(config['examples'], str):
with open(config['examples']) as f:
if config['examples'].endswith('.json'):
examples = json.load(f)
elif config['examples'].endswith(('.yaml', '.yml')):
examples = yaml.safe_load(f)
else:
raise ValueError(
'Invalid file format. Only json or yaml formats are supported.'
)
config['examples'] = examples
else:
raise ValueError(
'Invalid examples format. Only list or string are supported.')
return config
|
def _load_examples(config: dict) ->dict:
"""Load examples if necessary."""
if isinstance(config['examples'], list):
pass
elif isinstance(config['examples'], str):
with open(config['examples']) as f:
if config['examples'].endswith('.json'):
examples = json.load(f)
elif config['examples'].endswith(('.yaml', '.yml')):
examples = yaml.safe_load(f)
else:
raise ValueError(
'Invalid file format. Only json or yaml formats are supported.'
)
config['examples'] = examples
else:
raise ValueError(
'Invalid examples format. Only list or string are supported.')
return config
|
Load examples if necessary.
|
get
|
res = self.redis_client.getex(f'{self.full_key_prefix}:{key}', ex=self.
recall_ttl) or default or ''
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
|
def get(self, key: str, default: Optional[str]=None) ->Optional[str]:
res = self.redis_client.getex(f'{self.full_key_prefix}:{key}', ex=self.
recall_ttl) or default or ''
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
| null |
merge_documents
|
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
retriever_docs = [retriever.get_relevant_documents(query, callbacks=
run_manager.get_child('retriever_{}'.format(i + 1))) for i, retriever in
enumerate(self.retrievers)]
merged_documents = []
max_docs = max(len(docs) for docs in retriever_docs)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
def merge_documents(self, query: str, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
retriever_docs = [retriever.get_relevant_documents(query, callbacks=
run_manager.get_child('retriever_{}'.format(i + 1))) for i,
retriever in enumerate(self.retrievers)]
merged_documents = []
max_docs = max(len(docs) for docs in retriever_docs)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
|
_get_google_trends
|
return GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper(**kwargs))
|
def _get_google_trends(**kwargs: Any) ->BaseTool:
return GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper(**kwargs))
| null |
flatten_run
|
"""Utility to flatten a nest run object into a list of runs.
:param run: The base run to flatten.
:return: The flattened list of runs.
"""
def flatten(child_runs: List[Dict[str, Any]]) ->List[Dict[str, Any]]:
"""Utility to recursively flatten a list of child runs in a run.
:param child_runs: The list of child runs to flatten.
:return: The flattened list of runs.
"""
if child_runs is None:
return []
result = []
for item in child_runs:
child_runs = item.pop('child_runs', [])
result.append(item)
result.extend(flatten(child_runs))
return result
return flatten([run])
|
def flatten_run(self, run: Dict[str, Any]) ->List[Dict[str, Any]]:
"""Utility to flatten a nest run object into a list of runs.
:param run: The base run to flatten.
:return: The flattened list of runs.
"""
def flatten(child_runs: List[Dict[str, Any]]) ->List[Dict[str, Any]]:
"""Utility to recursively flatten a list of child runs in a run.
:param child_runs: The list of child runs to flatten.
:return: The flattened list of runs.
"""
if child_runs is None:
return []
result = []
for item in child_runs:
child_runs = item.pop('child_runs', [])
result.append(item)
result.extend(flatten(child_runs))
return result
return flatten([run])
|
Utility to flatten a nest run object into a list of runs.
:param run: The base run to flatten.
:return: The flattened list of runs.
|
test_functionality_multiline
|
"""Test correct functionality for ChatGPT multiline commands."""
chain = PythonREPL()
tool = PythonREPLTool(python_repl=chain)
output = tool.run(_SAMPLE_CODE)
assert output == '30\n'
|
def test_functionality_multiline() ->None:
"""Test correct functionality for ChatGPT multiline commands."""
chain = PythonREPL()
tool = PythonREPLTool(python_repl=chain)
output = tool.run(_SAMPLE_CODE)
assert output == '30\n'
|
Test correct functionality for ChatGPT multiline commands.
|
update_document
|
"""Update a document in the cluster.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
text = document.page_content
metadata = document.metadata
self._cluster.update(ids=[document_id], documents=[text], metadatas=[metadata])
|
def update_document(self, document_id: str, document: Document) ->None:
"""Update a document in the cluster.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
text = document.page_content
metadata = document.metadata
self._cluster.update(ids=[document_id], documents=[text], metadatas=[
metadata])
|
Update a document in the cluster.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
|
_get_message_metadata
|
"""Create and return metadata for a given message and channel."""
timestamp = message.get('ts', '')
user = message.get('user', '')
source = self._get_message_source(channel_name, user, timestamp)
return {'source': source, 'channel': channel_name, 'timestamp': timestamp,
'user': user}
|
def _get_message_metadata(self, message: dict, channel_name: str) ->dict:
"""Create and return metadata for a given message and channel."""
timestamp = message.get('ts', '')
user = message.get('user', '')
source = self._get_message_source(channel_name, user, timestamp)
return {'source': source, 'channel': channel_name, 'timestamp':
timestamp, 'user': user}
|
Create and return metadata for a given message and channel.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
github_repository = get_from_dict_or_env(values, 'github_repository',
'GITHUB_REPOSITORY')
github_app_id = get_from_dict_or_env(values, 'github_app_id', 'GITHUB_APP_ID')
github_app_private_key = get_from_dict_or_env(values,
'github_app_private_key', 'GITHUB_APP_PRIVATE_KEY')
try:
from github import Auth, GithubIntegration
except ImportError:
raise ImportError(
'PyGithub is not installed. Please install it with `pip install PyGithub`'
)
try:
with open(github_app_private_key, 'r') as f:
private_key = f.read()
except Exception:
private_key = github_app_private_key
auth = Auth.AppAuth(github_app_id, private_key)
gi = GithubIntegration(auth=auth)
installation = gi.get_installations()
if not installation:
raise ValueError(
f'Please make sure to install the created github app with id {github_app_id} on the repo: {github_repository}More instructions can be found at https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app'
)
try:
installation = installation[0]
except ValueError as e:
raise ValueError(
f'Please make sure to give correct github parameters Error message: {e}'
)
g = installation.get_github_for_installation()
repo = g.get_repo(github_repository)
github_base_branch = get_from_dict_or_env(values, 'github_base_branch',
'GITHUB_BASE_BRANCH', default=repo.default_branch)
active_branch = get_from_dict_or_env(values, 'active_branch',
'ACTIVE_BRANCH', default=repo.default_branch)
values['github'] = g
values['github_repo_instance'] = repo
values['github_repository'] = github_repository
values['github_app_id'] = github_app_id
values['github_app_private_key'] = github_app_private_key
values['active_branch'] = active_branch
values['github_base_branch'] = github_base_branch
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
github_repository = get_from_dict_or_env(values, 'github_repository',
'GITHUB_REPOSITORY')
github_app_id = get_from_dict_or_env(values, 'github_app_id',
'GITHUB_APP_ID')
github_app_private_key = get_from_dict_or_env(values,
'github_app_private_key', 'GITHUB_APP_PRIVATE_KEY')
try:
from github import Auth, GithubIntegration
except ImportError:
raise ImportError(
'PyGithub is not installed. Please install it with `pip install PyGithub`'
)
try:
with open(github_app_private_key, 'r') as f:
private_key = f.read()
except Exception:
private_key = github_app_private_key
auth = Auth.AppAuth(github_app_id, private_key)
gi = GithubIntegration(auth=auth)
installation = gi.get_installations()
if not installation:
raise ValueError(
f'Please make sure to install the created github app with id {github_app_id} on the repo: {github_repository}More instructions can be found at https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app'
)
try:
installation = installation[0]
except ValueError as e:
raise ValueError(
f'Please make sure to give correct github parameters Error message: {e}'
)
g = installation.get_github_for_installation()
repo = g.get_repo(github_repository)
github_base_branch = get_from_dict_or_env(values, 'github_base_branch',
'GITHUB_BASE_BRANCH', default=repo.default_branch)
active_branch = get_from_dict_or_env(values, 'active_branch',
'ACTIVE_BRANCH', default=repo.default_branch)
values['github'] = g
values['github_repo_instance'] = repo
values['github_repository'] = github_repository
values['github_app_id'] = github_app_id
values['github_app_private_key'] = github_app_private_key
values['active_branch'] = active_branch
values['github_base_branch'] = github_base_branch
return values
|
Validate that api key and python package exists in environment.
|
test_partial_text_json_output_parser_diff
|
def input_iter(_: Any) ->Iterator[str]:
for token in STREAMED_TOKENS:
yield token
chain = input_iter | SimpleJsonOutputParser(diff=True)
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF
|
def test_partial_text_json_output_parser_diff() ->None:
def input_iter(_: Any) ->Iterator[str]:
for token in STREAMED_TOKENS:
yield token
chain = input_iter | SimpleJsonOutputParser(diff=True)
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF
| null |
from_chain_type
|
"""Load chain from chain type."""
_chain_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_with_sources_chain(llm, chain_type=
chain_type, **_chain_kwargs)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
@classmethod
def from_chain_type(cls, llm: BaseLanguageModel, chain_type: str='stuff',
chain_type_kwargs: Optional[dict]=None, **kwargs: Any
) ->BaseQAWithSourcesChain:
"""Load chain from chain type."""
_chain_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_with_sources_chain(llm, chain_type=
chain_type, **_chain_kwargs)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
Load chain from chain type.
|
_get_metric
|
"""Get the metric function for the given metric name.
Args:
metric (EmbeddingDistance): The metric name.
Returns:
Any: The metric function.
"""
metrics = {EmbeddingDistance.COSINE: self._cosine_distance,
EmbeddingDistance.EUCLIDEAN: self._euclidean_distance,
EmbeddingDistance.MANHATTAN: self._manhattan_distance,
EmbeddingDistance.CHEBYSHEV: self._chebyshev_distance,
EmbeddingDistance.HAMMING: self._hamming_distance}
if metric in metrics:
return metrics[metric]
else:
raise ValueError(f'Invalid metric: {metric}')
|
def _get_metric(self, metric: EmbeddingDistance) ->Any:
"""Get the metric function for the given metric name.
Args:
metric (EmbeddingDistance): The metric name.
Returns:
Any: The metric function.
"""
metrics = {EmbeddingDistance.COSINE: self._cosine_distance,
EmbeddingDistance.EUCLIDEAN: self._euclidean_distance,
EmbeddingDistance.MANHATTAN: self._manhattan_distance,
EmbeddingDistance.CHEBYSHEV: self._chebyshev_distance,
EmbeddingDistance.HAMMING: self._hamming_distance}
if metric in metrics:
return metrics[metric]
else:
raise ValueError(f'Invalid metric: {metric}')
|
Get the metric function for the given metric name.
Args:
metric (EmbeddingDistance): The metric name.
Returns:
Any: The metric function.
|
_import_bearly_tool
|
from langchain_community.tools.bearly.tool import BearlyInterpreterTool
return BearlyInterpreterTool
|
def _import_bearly_tool() ->Any:
from langchain_community.tools.bearly.tool import BearlyInterpreterTool
return BearlyInterpreterTool
| null |
__init__
|
self.persist_path = persist_path
|
def __init__(self, persist_path: str) ->None:
self.persist_path = persist_path
| null |
_import_google_palm
|
from langchain_community.llms.google_palm import GooglePalm
return GooglePalm
|
def _import_google_palm() ->Any:
from langchain_community.llms.google_palm import GooglePalm
return GooglePalm
| null |
as_retriever
|
"""Return VectorStoreRetriever initialized from this VectorStore.
Args:
search_type (Optional[str]): Defines the type of search that
the Retriever should perform.
Can be "similarity" (default), "mmr", or
"similarity_score_threshold".
search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
k: Amount of documents to return (Default: 4)
score_threshold: Minimum relevance threshold
for similarity_score_threshold
fetch_k: Amount of documents to pass to MMR algorithm (Default: 20)
lambda_mult: Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5)
filter: Filter by document metadata
Returns:
VectorStoreRetriever: Retriever class for VectorStore.
Examples:
.. code-block:: python
# Retrieve more documents with higher diversity
# Useful if your dataset has many similar documents
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 6, 'lambda_mult': 0.25}
)
# Fetch more documents for the MMR algorithm to consider
# But only return the top 5
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 5, 'fetch_k': 50}
)
# Only retrieve documents that have a relevance score
# Above a certain threshold
docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={'score_threshold': 0.8}
)
# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})
# Use a filter to only retrieve documents from a specific paper
docsearch.as_retriever(
search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}}
)
"""
tags = kwargs.pop('tags', None) or []
tags.extend(self._get_retriever_tags())
return VectorStoreRetriever(vectorstore=self, **kwargs, tags=tags)
|
def as_retriever(self, **kwargs: Any) ->VectorStoreRetriever:
"""Return VectorStoreRetriever initialized from this VectorStore.
Args:
search_type (Optional[str]): Defines the type of search that
the Retriever should perform.
Can be "similarity" (default), "mmr", or
"similarity_score_threshold".
search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
k: Amount of documents to return (Default: 4)
score_threshold: Minimum relevance threshold
for similarity_score_threshold
fetch_k: Amount of documents to pass to MMR algorithm (Default: 20)
lambda_mult: Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5)
filter: Filter by document metadata
Returns:
VectorStoreRetriever: Retriever class for VectorStore.
Examples:
.. code-block:: python
# Retrieve more documents with higher diversity
# Useful if your dataset has many similar documents
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 6, 'lambda_mult': 0.25}
)
# Fetch more documents for the MMR algorithm to consider
# But only return the top 5
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 5, 'fetch_k': 50}
)
# Only retrieve documents that have a relevance score
# Above a certain threshold
docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={'score_threshold': 0.8}
)
# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})
# Use a filter to only retrieve documents from a specific paper
docsearch.as_retriever(
search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}}
)
"""
tags = kwargs.pop('tags', None) or []
tags.extend(self._get_retriever_tags())
return VectorStoreRetriever(vectorstore=self, **kwargs, tags=tags)
|
Return VectorStoreRetriever initialized from this VectorStore.
Args:
search_type (Optional[str]): Defines the type of search that
the Retriever should perform.
Can be "similarity" (default), "mmr", or
"similarity_score_threshold".
search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
k: Amount of documents to return (Default: 4)
score_threshold: Minimum relevance threshold
for similarity_score_threshold
fetch_k: Amount of documents to pass to MMR algorithm (Default: 20)
lambda_mult: Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5)
filter: Filter by document metadata
Returns:
VectorStoreRetriever: Retriever class for VectorStore.
Examples:
.. code-block:: python
# Retrieve more documents with higher diversity
# Useful if your dataset has many similar documents
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 6, 'lambda_mult': 0.25}
)
# Fetch more documents for the MMR algorithm to consider
# But only return the top 5
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 5, 'fetch_k': 50}
)
# Only retrieve documents that have a relevance score
# Above a certain threshold
docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={'score_threshold': 0.8}
)
# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})
# Use a filter to only retrieve documents from a specific paper
docsearch.as_retriever(
search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}}
)
|
parse_to_str
|
"""Parse the details result."""
result = ''
for key, value in details.items():
result += 'The ' + str(key) + ' is: ' + str(value) + '\n'
return result
|
def parse_to_str(self, details: dict) ->str:
"""Parse the details result."""
result = ''
for key, value in details.items():
result += 'The ' + str(key) + ' is: ' + str(value) + '\n'
return result
|
Parse the details result.
|
_get_run
|
try:
run = self.run_map[str(run_id)]
except KeyError as exc:
raise TracerException(f'No indexed run ID {run_id}.') from exc
if run_type is not None and run.run_type != run_type:
raise TracerException(
f'Found {run.run_type} run at ID {run_id}, but expected {run_type} run.'
)
return run
|
def _get_run(self, run_id: UUID, run_type: Optional[str]=None) ->Run:
try:
run = self.run_map[str(run_id)]
except KeyError as exc:
raise TracerException(f'No indexed run ID {run_id}.') from exc
if run_type is not None and run.run_type != run_type:
raise TracerException(
f'Found {run.run_type} run at ID {run_id}, but expected {run_type} run.'
)
return run
| null |
test_pgvector_relevance_score
|
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
1.0), (Document(page_content='bar', metadata={'page': '1'}),
0.9996744261675065), (Document(page_content='baz', metadata={'page':
'2'}), 0.9986996093328621)]
|
def test_pgvector_relevance_score() ->None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
1.0), (Document(page_content='bar', metadata={'page': '1'}),
0.9996744261675065), (Document(page_content='baz', metadata={'page':
'2'}), 0.9986996093328621)]
|
Test to make sure the relevance score is scaled to 0-1.
|
_run
|
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
result_container = []
def thread_target() ->None:
nonlocal result_container
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result_container.append(new_loop.run_until_complete(self._arun(
*args, **kwargs)))
except Exception as e:
result_container.append(e)
finally:
new_loop.close()
thread = threading.Thread(target=thread_target)
thread.start()
thread.join()
result = result_container[0]
if isinstance(result, Exception):
raise result
return result
else:
result = loop.run_until_complete(self._arun(*args, **kwargs))
loop.close()
return result
|
def _run(self, *args: Any, run_manager: Optional[CallbackManagerForToolRun]
=None, **kwargs: Any) ->str:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
result_container = []
def thread_target() ->None:
nonlocal result_container
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result_container.append(new_loop.run_until_complete(self.
_arun(*args, **kwargs)))
except Exception as e:
result_container.append(e)
finally:
new_loop.close()
thread = threading.Thread(target=thread_target)
thread.start()
thread.join()
result = result_container[0]
if isinstance(result, Exception):
raise result
return result
else:
result = loop.run_until_complete(self._arun(*args, **kwargs))
loop.close()
return result
| null |
create_filter
|
md_filter_expr = self.config.field_name_mapping[md_key]
if md_filter_expr is None:
return ''
expr = md_filter_expr.split(',')
if len(expr) != 2:
logger.error(
f'filter {md_filter_expr} express is not correct, must contain mapping field and operator.'
)
return ''
md_filter_key = expr[0].strip()
md_filter_operator = expr[1].strip()
if isinstance(md_value, numbers.Number):
return f'{md_filter_key} {md_filter_operator} {md_value}'
return f'{md_filter_key}{md_filter_operator}"{md_value}"'
|
def create_filter(md_key: str, md_value: Any) ->str:
md_filter_expr = self.config.field_name_mapping[md_key]
if md_filter_expr is None:
return ''
expr = md_filter_expr.split(',')
if len(expr) != 2:
logger.error(
f'filter {md_filter_expr} express is not correct, must contain mapping field and operator.'
)
return ''
md_filter_key = expr[0].strip()
md_filter_operator = expr[1].strip()
if isinstance(md_value, numbers.Number):
return f'{md_filter_key} {md_filter_operator} {md_value}'
return f'{md_filter_key}{md_filter_operator}"{md_value}"'
| null |
getERC721Tx
|
url = (
f'https://api.etherscan.io/api?module=account&action=tokennfttx&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}'
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print('Error occurred while making the request:', e)
items = response.json()['result']
result = []
if len(items) == 0:
return [Document(page_content='')]
for item in items:
content = str(item)
metadata = {'from': item['from'], 'tx_hash': item['hash'], 'to': item['to']
}
result.append(Document(page_content=content, metadata=metadata))
return result
|
def getERC721Tx(self) ->List[Document]:
url = (
f'https://api.etherscan.io/api?module=account&action=tokennfttx&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}'
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print('Error occurred while making the request:', e)
items = response.json()['result']
result = []
if len(items) == 0:
return [Document(page_content='')]
for item in items:
content = str(item)
metadata = {'from': item['from'], 'tx_hash': item['hash'], 'to':
item['to']}
result.append(Document(page_content=content, metadata=metadata))
return result
| null |
__init__
|
self.generator = generator
|
def __init__(self, generator: Any):
self.generator = generator
| null |
_validate
|
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {'score': True}
except ValidationError as e:
return {'score': False, 'reasoning': repr(e)}
|
def _validate(self, prediction: Any, schema: Any) ->dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {'score': True}
except ValidationError as e:
return {'score': False, 'reasoning': repr(e)}
| null |
__init__
|
"""Initialize the WhatsAppChatLoader.
Args:
path (str): Path to the exported WhatsApp chat
zip directory, folder, or file.
To generate the dump, open the chat, click the three dots in the top
right corner, and select "More". Then select "Export chat" and
choose "Without media".
"""
self.path = path
ignore_lines = ['This message was deleted', '<Media omitted>',
'image omitted',
'Messages and calls are end-to-end encrypted. No one outside of this chat, not even WhatsApp, can read or listen to them.'
]
self._ignore_lines = re.compile('(' + '|'.join([('\\u200E*' + line) for
line in ignore_lines]) + ')', flags=re.IGNORECASE)
self._message_line_regex = re.compile(
'\\u200E*\\[?(\\d{1,2}/\\d{1,2}/\\d{2,4}, \\d{1,2}:\\d{2}:\\d{2} (?:AM|PM))\\]?[ \\u200E]*([^:]+): (.+)'
, flags=re.IGNORECASE)
|
def __init__(self, path: str):
"""Initialize the WhatsAppChatLoader.
Args:
path (str): Path to the exported WhatsApp chat
zip directory, folder, or file.
To generate the dump, open the chat, click the three dots in the top
right corner, and select "More". Then select "Export chat" and
choose "Without media".
"""
self.path = path
ignore_lines = ['This message was deleted', '<Media omitted>',
'image omitted',
'Messages and calls are end-to-end encrypted. No one outside of this chat, not even WhatsApp, can read or listen to them.'
]
self._ignore_lines = re.compile('(' + '|'.join([('\\u200E*' + line) for
line in ignore_lines]) + ')', flags=re.IGNORECASE)
self._message_line_regex = re.compile(
'\\u200E*\\[?(\\d{1,2}/\\d{1,2}/\\d{2,4}, \\d{1,2}:\\d{2}:\\d{2} (?:AM|PM))\\]?[ \\u200E]*([^:]+): (.+)'
, flags=re.IGNORECASE)
|
Initialize the WhatsAppChatLoader.
Args:
path (str): Path to the exported WhatsApp chat
zip directory, folder, or file.
To generate the dump, open the chat, click the three dots in the top
right corner, and select "More". Then select "Export chat" and
choose "Without media".
|
test_similarity_search_with_score_with_limit_distance
|
"""Test similarity search with score with limit score."""
docsearch = Redis.from_texts(texts, ConsistentFakeEmbeddings(), redis_url=
TEST_REDIS_URL)
output = docsearch.similarity_search_with_score(texts[0], k=3,
distance_threshold=0.1, return_metadata=True)
assert len(output) == 2
for out, score in output:
if out.page_content == texts[1]:
score == COSINE_SCORE
assert drop(docsearch.index_name)
|
def test_similarity_search_with_score_with_limit_distance(texts: List[str]
) ->None:
"""Test similarity search with score with limit score."""
docsearch = Redis.from_texts(texts, ConsistentFakeEmbeddings(),
redis_url=TEST_REDIS_URL)
output = docsearch.similarity_search_with_score(texts[0], k=3,
distance_threshold=0.1, return_metadata=True)
assert len(output) == 2
for out, score in output:
if out.page_content == texts[1]:
score == COSINE_SCORE
assert drop(docsearch.index_name)
|
Test similarity search with score with limit score.
|
validate_environment
|
"""Validate that python package exists in environment."""
try:
import text_generation
values['client'] = text_generation.Client(values['inference_server_url'
], timeout=values['timeout'], **values['server_kwargs'])
values['async_client'] = text_generation.AsyncClient(values[
'inference_server_url'], timeout=values['timeout'], **values[
'server_kwargs'])
except ImportError:
raise ImportError(
'Could not import text_generation python package. Please install it with `pip install text_generation`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that python package exists in environment."""
try:
import text_generation
values['client'] = text_generation.Client(values[
'inference_server_url'], timeout=values['timeout'], **values[
'server_kwargs'])
values['async_client'] = text_generation.AsyncClient(values[
'inference_server_url'], timeout=values['timeout'], **values[
'server_kwargs'])
except ImportError:
raise ImportError(
'Could not import text_generation python package. Please install it with `pip install text_generation`.'
)
return values
|
Validate that python package exists in environment.
|
_on_chain_end
|
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(f"{get_colored_text('[chain/end]', color='blue')} " +
get_bolded_text(
f"""[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:
"""
) + f"{try_json_stringify(run.outputs, '[outputs]')}")
|
def _on_chain_end(self, run: Run) ->None:
crumbs = self.get_breadcrumbs(run)
run_type = run.run_type.capitalize()
self.function_callback(
f"{get_colored_text('[chain/end]', color='blue')} " +
get_bolded_text(
f"""[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:
"""
) + f"{try_json_stringify(run.outputs, '[outputs]')}")
| null |
__init__
|
log_method = getattr(logger, logging.getLevelName(level=log_level).lower())
def callback(text: str) ->None:
log_method(text, extra=extra)
super().__init__(function=callback, **kwargs)
|
def __init__(self, logger: logging.Logger, log_level: int=logging.INFO,
extra: Optional[dict]=None, **kwargs: Any) ->None:
log_method = getattr(logger, logging.getLevelName(level=log_level).lower())
def callback(text: str) ->None:
log_method(text, extra=extra)
super().__init__(function=callback, **kwargs)
| null |
service_url_from_db_params
|
"""Return connection string from database parameters."""
return f'postgresql://{user}:{password}@{host}:{port}/{database}'
|
@classmethod
def service_url_from_db_params(cls, host: str, port: int, database: str,
user: str, password: str) ->str:
"""Return connection string from database parameters."""
return f'postgresql://{user}:{password}@{host}:{port}/{database}'
|
Return connection string from database parameters.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.