method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_import_file_management_FileSearchTool
|
from langchain_community.tools.file_management import FileSearchTool
return FileSearchTool
|
def _import_file_management_FileSearchTool() ->Any:
from langchain_community.tools.file_management import FileSearchTool
return FileSearchTool
| null |
test_analyticdb_with_engine_args
|
engine_args = {'pool_recycle': 3600, 'pool_size': 50}
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True,
engine_args=engine_args)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_analyticdb_with_engine_args() ->None:
engine_args = {'pool_recycle': 3600, 'pool_size': 50}
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_collection=True,
engine_args=engine_args)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
| null |
_default_params
|
"""Get the default parameters for calling OpenAI API."""
params = {'model': self.model_name, 'stream': self.streaming, 'n': self.n,
'temperature': self.temperature, **self.model_kwargs}
if self.max_tokens is not None:
params['max_tokens'] = self.max_tokens
if self.request_timeout is not None and not is_openai_v1():
params['request_timeout'] = self.request_timeout
return params
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
params = {'model': self.model_name, 'stream': self.streaming, 'n': self
.n, 'temperature': self.temperature, **self.model_kwargs}
if self.max_tokens is not None:
params['max_tokens'] = self.max_tokens
if self.request_timeout is not None and not is_openai_v1():
params['request_timeout'] = self.request_timeout
return params
|
Get the default parameters for calling OpenAI API.
|
on_llm_start
|
"""Run when LLM starts running."""
self.answer_reached = False
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Run when LLM starts running."""
self.answer_reached = False
|
Run when LLM starts running.
|
similarity_search_with_score
|
"""Run similarity search with distance."""
return self._similarity_search_with_relevance_scores(query, k=k, metadata=
metadata, **kwargs)
|
def similarity_search_with_score(self, query: str, k: int=4, metadata:
Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Tuple[Document, float]
]:
"""Run similarity search with distance."""
return self._similarity_search_with_relevance_scores(query, k=k,
metadata=metadata, **kwargs)
|
Run similarity search with distance.
|
test_timescalevector_with_filter_match
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'0'})
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)]
|
def test_timescalevector_with_filter_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, service_url=SERVICE_URL,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '0'})
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)]
|
Test end to end construction and search.
|
_llm_type
|
"""Return type of LLM.
This is an override of the base class method.
"""
return 'opaqueprompts'
|
@property
def _llm_type(self) ->str:
"""Return type of LLM.
This is an override of the base class method.
"""
return 'opaqueprompts'
|
Return type of LLM.
This is an override of the base class method.
|
_import_llamacpp
|
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
|
def _import_llamacpp() ->Any:
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
| null |
_diff
|
return jsonpatch.make_patch(prev, next).patch
|
def _diff(self, prev: Optional[Any], next: Any) ->Any:
return jsonpatch.make_patch(prev, next).patch
| null |
from_llm
|
"""Create a RetryWithErrorOutputParser from an LLM.
Args:
llm: The LLM to use to retry the completion.
parser: The parser to use to parse the output.
prompt: The prompt to use to retry the completion.
max_retries: The maximum number of times to retry the completion.
Returns:
A RetryWithErrorOutputParser.
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, parser: BaseOutputParser[T],
prompt: BasePromptTemplate=NAIVE_RETRY_WITH_ERROR_PROMPT, max_retries:
int=1) ->RetryWithErrorOutputParser[T]:
"""Create a RetryWithErrorOutputParser from an LLM.
Args:
llm: The LLM to use to retry the completion.
parser: The parser to use to parse the output.
prompt: The prompt to use to retry the completion.
max_retries: The maximum number of times to retry the completion.
Returns:
A RetryWithErrorOutputParser.
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
|
Create a RetryWithErrorOutputParser from an LLM.
Args:
llm: The LLM to use to retry the completion.
parser: The parser to use to parse the output.
prompt: The prompt to use to retry the completion.
max_retries: The maximum number of times to retry the completion.
Returns:
A RetryWithErrorOutputParser.
|
similarity_search_by_vector
|
"""Run similarity search between a query vector and the indexed vectors.
Args:
embedding (List[float]): The query vector for which to find similar
documents.
k (int): The number of documents to return. Default is 4.
filter (RedisFilterExpression, optional): Optional metadata filter.
Defaults to None.
return_metadata (bool, optional): Whether to return metadata.
Defaults to True.
distance_threshold (Optional[float], optional): Maximum vector distance
between selected documents and the query vector. Defaults to None.
Returns:
List[Document]: A list of documents that are most similar to the query
text.
"""
try:
import redis
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
if 'score_threshold' in kwargs:
logger.warning(
'score_threshold is deprecated. Use distance_threshold instead.' +
'score_threshold should only be used in ' +
'similarity_search_with_relevance_scores.' +
'score_threshold will be removed in a future release.')
redis_query, params_dict = self._prepare_query(embedding, k=k, filter=
filter, distance_threshold=distance_threshold, with_metadata=
return_metadata, with_distance=False)
try:
results = self.client.ft(self.index_name).search(redis_query, params_dict)
except redis.exceptions.ResponseError as e:
if str(e).split(' ')[0] == 'Syntax':
raise ValueError('Query failed with syntax error. ' +
'This is likely due to malformation of ' +
'filter, vector, or query argument') from e
raise e
docs = []
for result in results.docs:
metadata = {}
if return_metadata:
metadata = {'id': result.id}
metadata.update(self._collect_metadata(result))
content_key = self._schema.content_key
docs.append(Document(page_content=getattr(result, content_key),
metadata=metadata))
return docs
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filter: Optional[RedisFilterExpression]=None, return_metadata: bool=
True, distance_threshold: Optional[float]=None, **kwargs: Any) ->List[
Document]:
"""Run similarity search between a query vector and the indexed vectors.
Args:
embedding (List[float]): The query vector for which to find similar
documents.
k (int): The number of documents to return. Default is 4.
filter (RedisFilterExpression, optional): Optional metadata filter.
Defaults to None.
return_metadata (bool, optional): Whether to return metadata.
Defaults to True.
distance_threshold (Optional[float], optional): Maximum vector distance
between selected documents and the query vector. Defaults to None.
Returns:
List[Document]: A list of documents that are most similar to the query
text.
"""
try:
import redis
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
if 'score_threshold' in kwargs:
logger.warning(
'score_threshold is deprecated. Use distance_threshold instead.' +
'score_threshold should only be used in ' +
'similarity_search_with_relevance_scores.' +
'score_threshold will be removed in a future release.')
redis_query, params_dict = self._prepare_query(embedding, k=k, filter=
filter, distance_threshold=distance_threshold, with_metadata=
return_metadata, with_distance=False)
try:
results = self.client.ft(self.index_name).search(redis_query,
params_dict)
except redis.exceptions.ResponseError as e:
if str(e).split(' ')[0] == 'Syntax':
raise ValueError('Query failed with syntax error. ' +
'This is likely due to malformation of ' +
'filter, vector, or query argument') from e
raise e
docs = []
for result in results.docs:
metadata = {}
if return_metadata:
metadata = {'id': result.id}
metadata.update(self._collect_metadata(result))
content_key = self._schema.content_key
docs.append(Document(page_content=getattr(result, content_key),
metadata=metadata))
return docs
|
Run similarity search between a query vector and the indexed vectors.
Args:
embedding (List[float]): The query vector for which to find similar
documents.
k (int): The number of documents to return. Default is 4.
filter (RedisFilterExpression, optional): Optional metadata filter.
Defaults to None.
return_metadata (bool, optional): Whether to return metadata.
Defaults to True.
distance_threshold (Optional[float], optional): Maximum vector distance
between selected documents and the query vector. Defaults to None.
Returns:
List[Document]: A list of documents that are most similar to the query
text.
|
completion_with_retry
|
def _completion_with_retry(**kwargs: Any) ->Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
|
def completion_with_retry(self, run_manager: Optional[
CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
def _completion_with_retry(**kwargs: Any) ->Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
| null |
setUp
|
self.human_msg = HumanMessage(content='human')
self.ai_msg = AIMessage(content='ai')
self.sys_msg = SystemMessage(content='system')
self.func_msg = FunctionMessage(name='func', content='function')
self.tool_msg = ToolMessage(tool_call_id='tool_id', content='tool')
self.chat_msg = ChatMessage(role='Chat', content='chat')
|
def setUp(self) ->None:
self.human_msg = HumanMessage(content='human')
self.ai_msg = AIMessage(content='ai')
self.sys_msg = SystemMessage(content='system')
self.func_msg = FunctionMessage(name='func', content='function')
self.tool_msg = ToolMessage(tool_call_id='tool_id', content='tool')
self.chat_msg = ChatMessage(role='Chat', content='chat')
| null |
_on_llm_error
|
"""Process the LLM Run upon error."""
|
def _on_llm_error(self, run: Run) ->None:
"""Process the LLM Run upon error."""
|
Process the LLM Run upon error.
|
validate_environment
|
anthropic_api_key = convert_to_secret_str(values.get('anthropic_api_key') or
os.environ.get('ANTHROPIC_API_KEY') or '')
values['anthropic_api_key'] = anthropic_api_key
values['_client'] = anthropic.Client(api_key=anthropic_api_key.
get_secret_value())
values['_async_client'] = anthropic.AsyncClient(api_key=anthropic_api_key.
get_secret_value())
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
anthropic_api_key = convert_to_secret_str(values.get(
'anthropic_api_key') or os.environ.get('ANTHROPIC_API_KEY') or '')
values['anthropic_api_key'] = anthropic_api_key
values['_client'] = anthropic.Client(api_key=anthropic_api_key.
get_secret_value())
values['_async_client'] = anthropic.AsyncClient(api_key=
anthropic_api_key.get_secret_value())
return values
| null |
_invocation_params
|
openai_args = {'model': self.model, 'request_timeout': self.request_timeout,
'headers': self.headers, 'api_key': self.openai_api_key, 'organization':
self.openai_organization, 'api_base': self.openai_api_base,
'api_version': self.openai_api_version, **self.model_kwargs}
if self.openai_proxy:
import openai
openai.proxy = {'http': self.openai_proxy, 'https': self.openai_proxy}
return openai_args
|
@property
def _invocation_params(self) ->Dict:
openai_args = {'model': self.model, 'request_timeout': self.
request_timeout, 'headers': self.headers, 'api_key': self.
openai_api_key, 'organization': self.openai_organization,
'api_base': self.openai_api_base, 'api_version': self.
openai_api_version, **self.model_kwargs}
if self.openai_proxy:
import openai
openai.proxy = {'http': self.openai_proxy, 'https': self.openai_proxy}
return openai_args
| null |
output_keys
|
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
|
@property
def output_keys(self) ->List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
|
Return the output keys.
:meta private:
|
get_format_instructions
|
schema = self.pydantic_object.schema()
reduced_schema = schema
if 'title' in reduced_schema:
del reduced_schema['title']
if 'type' in reduced_schema:
del reduced_schema['type']
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
|
def get_format_instructions(self) ->str:
schema = self.pydantic_object.schema()
reduced_schema = schema
if 'title' in reduced_schema:
del reduced_schema['title']
if 'type' in reduced_schema:
del reduced_schema['type']
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
| null |
test_awa_embedding_documents
|
"""Test Awa embeddings for documents."""
documents = ['foo bar', 'test document']
embedding = AwaEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 768
|
def test_awa_embedding_documents() ->None:
"""Test Awa embeddings for documents."""
documents = ['foo bar', 'test document']
embedding = AwaEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 768
|
Test Awa embeddings for documents.
|
test_visit_comparison
|
comp = Comparison(comparator=Comparator.EQ, attribute='foo', value='10')
expected = {'term': {'metadata.foo.keyword': '10'}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison() ->None:
comp = Comparison(comparator=Comparator.EQ, attribute='foo', value='10')
expected = {'term': {'metadata.foo.keyword': '10'}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
is_lc_serializable
|
"""Return whether this class is serializable."""
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether this class is serializable."""
return True
|
Return whether this class is serializable.
|
parse_dependency_string
|
if dep is not None and dep.startswith('git+'):
if repo is not None or branch is not None:
raise ValueError(
'If a dependency starts with git+, you cannot manually specify a repo or branch.'
)
gitstring = dep[4:]
subdirectory = None
ref = None
if '#subdirectory=' in gitstring:
gitstring, subdirectory = gitstring.split('#subdirectory=')
if '#' in subdirectory or '@' in subdirectory:
raise ValueError(
'#subdirectory must be the last part of the dependency string')
if '://' not in gitstring:
raise ValueError(
'git+ dependencies must start with git+https:// or git+ssh://')
_, find_slash = gitstring.split('://', 1)
if '/' not in find_slash:
post_slash = find_slash
ref = None
else:
_, post_slash = find_slash.split('/', 1)
if '@' in post_slash or '#' in post_slash:
_, ref = re.split('[@#]', post_slash, 1)
gitstring = gitstring[:-len(ref) - 1] if ref is not None else gitstring
return DependencySource(git=gitstring, ref=ref, subdirectory=
subdirectory, api_path=api_path, event_metadata={
'dependency_string': dep})
elif dep is not None and dep.startswith('https://'):
raise ValueError('Only git dependencies are supported')
else:
base_subdir = Path(DEFAULT_GIT_SUBDIRECTORY) if repo is None else Path()
subdir = str(base_subdir / dep) if dep is not None else None
gitstring = (DEFAULT_GIT_REPO if repo is None else
f"https://github.com/{repo.strip('/')}.git")
ref = DEFAULT_GIT_REF if branch is None else branch
return DependencySource(git=gitstring, ref=ref, subdirectory=subdir,
api_path=api_path, event_metadata={'dependency_string': dep,
'used_repo_flag': repo is not None, 'used_branch_flag': branch is not
None})
|
def parse_dependency_string(dep: Optional[str], repo: Optional[str], branch:
Optional[str], api_path: Optional[str]) ->DependencySource:
if dep is not None and dep.startswith('git+'):
if repo is not None or branch is not None:
raise ValueError(
'If a dependency starts with git+, you cannot manually specify a repo or branch.'
)
gitstring = dep[4:]
subdirectory = None
ref = None
if '#subdirectory=' in gitstring:
gitstring, subdirectory = gitstring.split('#subdirectory=')
if '#' in subdirectory or '@' in subdirectory:
raise ValueError(
'#subdirectory must be the last part of the dependency string'
)
if '://' not in gitstring:
raise ValueError(
'git+ dependencies must start with git+https:// or git+ssh://')
_, find_slash = gitstring.split('://', 1)
if '/' not in find_slash:
post_slash = find_slash
ref = None
else:
_, post_slash = find_slash.split('/', 1)
if '@' in post_slash or '#' in post_slash:
_, ref = re.split('[@#]', post_slash, 1)
gitstring = gitstring[:-len(ref) - 1] if ref is not None else gitstring
return DependencySource(git=gitstring, ref=ref, subdirectory=
subdirectory, api_path=api_path, event_metadata={
'dependency_string': dep})
elif dep is not None and dep.startswith('https://'):
raise ValueError('Only git dependencies are supported')
else:
base_subdir = Path(DEFAULT_GIT_SUBDIRECTORY) if repo is None else Path(
)
subdir = str(base_subdir / dep) if dep is not None else None
gitstring = (DEFAULT_GIT_REPO if repo is None else
f"https://github.com/{repo.strip('/')}.git")
ref = DEFAULT_GIT_REF if branch is None else branch
return DependencySource(git=gitstring, ref=ref, subdirectory=subdir,
api_path=api_path, event_metadata={'dependency_string': dep,
'used_repo_flag': repo is not None, 'used_branch_flag': branch
is not None})
| null |
test_chat_google_genai_single_call_with_history
|
model = ChatGoogleGenerativeAI(model=_MODEL)
text_question1, text_answer1 = 'How much is 2+2?', '4'
text_question2 = 'How much is 3+3?'
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
response = model([message1, message2, message3])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
def test_chat_google_genai_single_call_with_history() ->None:
model = ChatGoogleGenerativeAI(model=_MODEL)
text_question1, text_answer1 = 'How much is 2+2?', '4'
text_question2 = 'How much is 3+3?'
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
response = model([message1, message2, message3])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
| null |
learn
|
import vowpal_wabbit_next as vw
vw_ex = self.feature_embedder.format(event)
text_parser = vw.TextFormatParser(self.workspace)
multi_ex = parse_lines(text_parser, vw_ex)
self.workspace.learn_one(multi_ex)
|
def learn(self, event: TEvent) ->None:
import vowpal_wabbit_next as vw
vw_ex = self.feature_embedder.format(event)
text_parser = vw.TextFormatParser(self.workspace)
multi_ex = parse_lines(text_parser, vw_ex)
self.workspace.learn_one(multi_ex)
| null |
load
|
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
urls = [urljoin(self.base_url, path) for path in relative_paths]
soup_infos = self.scrape_all(urls)
_documents = [self._get_document(soup_info, url) for soup_info, url in
zip(soup_infos, urls)]
else:
soup_info = self.scrape()
_documents = [self._get_document(soup_info, self.web_path)]
documents = [d for d in _documents if d]
return documents
|
def load(self) ->List[Document]:
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
urls = [urljoin(self.base_url, path) for path in relative_paths]
soup_infos = self.scrape_all(urls)
_documents = [self._get_document(soup_info, url) for soup_info, url in
zip(soup_infos, urls)]
else:
soup_info = self.scrape()
_documents = [self._get_document(soup_info, self.web_path)]
documents = [d for d in _documents if d]
return documents
|
Fetch text from one single GitBook page.
|
_transform
|
buffer = ''
for chunk in input:
if isinstance(chunk, BaseMessage):
chunk_content = chunk.content
if not isinstance(chunk_content, str):
continue
chunk = chunk_content
buffer += chunk
try:
done_idx = 0
for m in droplastn(self.parse_iter(buffer), 1):
done_idx = m.end()
yield [m.group(1)]
buffer = buffer[done_idx:]
except NotImplementedError:
parts = self.parse(buffer)
if len(parts) > 1:
for part in parts[:-1]:
yield [part]
buffer = parts[-1]
for part in self.parse(buffer):
yield [part]
|
def _transform(self, input: Iterator[Union[str, BaseMessage]]) ->Iterator[List
[str]]:
buffer = ''
for chunk in input:
if isinstance(chunk, BaseMessage):
chunk_content = chunk.content
if not isinstance(chunk_content, str):
continue
chunk = chunk_content
buffer += chunk
try:
done_idx = 0
for m in droplastn(self.parse_iter(buffer), 1):
done_idx = m.end()
yield [m.group(1)]
buffer = buffer[done_idx:]
except NotImplementedError:
parts = self.parse(buffer)
if len(parts) > 1:
for part in parts[:-1]:
yield [part]
buffer = parts[-1]
for part in self.parse(buffer):
yield [part]
| null |
_import_google_trends
|
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
return GoogleTrendsAPIWrapper
|
def _import_google_trends() ->Any:
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
return GoogleTrendsAPIWrapper
| null |
__init__
|
"""Initialize with API key, connector id, and account id.
Args:
api_key: The Psychic API key.
account_id: The Psychic account id.
connector_id: The Psychic connector id.
"""
try:
from psychicapi import ConnectorId, Psychic
except ImportError:
raise ImportError(
'`psychicapi` package not found, please run `pip install psychicapi`')
self.psychic = Psychic(secret_key=api_key)
self.connector_id = ConnectorId(connector_id)
self.account_id = account_id
|
def __init__(self, api_key: str, account_id: str, connector_id: Optional[
str]=None):
"""Initialize with API key, connector id, and account id.
Args:
api_key: The Psychic API key.
account_id: The Psychic account id.
connector_id: The Psychic connector id.
"""
try:
from psychicapi import ConnectorId, Psychic
except ImportError:
raise ImportError(
'`psychicapi` package not found, please run `pip install psychicapi`'
)
self.psychic = Psychic(secret_key=api_key)
self.connector_id = ConnectorId(connector_id)
self.account_id = account_id
|
Initialize with API key, connector id, and account id.
Args:
api_key: The Psychic API key.
account_id: The Psychic account id.
connector_id: The Psychic connector id.
|
test_run_empty_query
|
"""Test that run gives the correct answer with empty query."""
search = api_client.run(query='', sort='relevance', time_filter='all',
subreddit='all', limit=5)
assert search == 'Searching r/all did not find any posts:'
|
@pytest.mark.requires('praw')
def test_run_empty_query(api_client: RedditSearchAPIWrapper) ->None:
"""Test that run gives the correct answer with empty query."""
search = api_client.run(query='', sort='relevance', time_filter='all',
subreddit='all', limit=5)
assert search == 'Searching r/all did not find any posts:'
|
Test that run gives the correct answer with empty query.
|
clear
|
"""Clear cache."""
with Session(self.engine) as session:
session.query(self.cache_schema).delete()
session.commit()
|
def clear(self, **kwargs: Any) ->None:
"""Clear cache."""
with Session(self.engine) as session:
session.query(self.cache_schema).delete()
session.commit()
|
Clear cache.
|
is_lc_serializable
|
"""Return whether or not the class is serializable."""
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether or not the class is serializable."""
return True
|
Return whether or not the class is serializable.
|
test_python_ast_repl_one_line_exception
|
program = '[1, 2, 3][4]'
tool = PythonAstREPLTool()
assert tool.run(program) == 'IndexError: list index out of range'
|
@pytest.mark.skipif(sys.version_info < (3, 9), reason=
'Requires python version >= 3.9 to run.')
def test_python_ast_repl_one_line_exception() ->None:
program = '[1, 2, 3][4]'
tool = PythonAstREPLTool()
assert tool.run(program) == 'IndexError: list index out of range'
| null |
get_query
|
sql_query = re.sub('\\[([\\w\\s,]+)\\]', replace_brackets, query)
return sql_query
|
def get_query(query):
sql_query = re.sub('\\[([\\w\\s,]+)\\]', replace_brackets, query)
return sql_query
| null |
_process_element
|
"""
Traverse through HTML tree recursively to preserve newline and skip
unwanted (code/binary) elements
"""
from bs4 import NavigableString
from bs4.element import Comment, Tag
tag_name = getattr(element, 'name', None)
if isinstance(element, Comment) or tag_name in elements_to_skip:
return ''
elif isinstance(element, NavigableString):
return element
elif tag_name == 'br':
return '\n'
elif tag_name in newline_elements:
return ''.join(_process_element(child, elements_to_skip,
newline_elements) for child in element.children if isinstance(child,
(Tag, NavigableString, Comment))) + '\n'
else:
return ''.join(_process_element(child, elements_to_skip,
newline_elements) for child in element.children if isinstance(child,
(Tag, NavigableString, Comment)))
|
def _process_element(element: Union[Tag, NavigableString, Comment],
elements_to_skip: List[str], newline_elements: List[str]) ->str:
"""
Traverse through HTML tree recursively to preserve newline and skip
unwanted (code/binary) elements
"""
from bs4 import NavigableString
from bs4.element import Comment, Tag
tag_name = getattr(element, 'name', None)
if isinstance(element, Comment) or tag_name in elements_to_skip:
return ''
elif isinstance(element, NavigableString):
return element
elif tag_name == 'br':
return '\n'
elif tag_name in newline_elements:
return ''.join(_process_element(child, elements_to_skip,
newline_elements) for child in element.children if isinstance(
child, (Tag, NavigableString, Comment))) + '\n'
else:
return ''.join(_process_element(child, elements_to_skip,
newline_elements) for child in element.children if isinstance(
child, (Tag, NavigableString, Comment)))
|
Traverse through HTML tree recursively to preserve newline and skip
unwanted (code/binary) elements
|
construct_full_prompt
|
prompt_start = """Your decisions must always be made independently without seeking user assistance.
Play to your strengths as an LLM and pursue simple strategies with no legal complications.
If you have completed all your tasks, make sure to use the "finish" command."""
full_prompt = f"""You are {self.ai_name}, {self.ai_role}
{prompt_start}
GOALS:
"""
for i, goal in enumerate(goals):
full_prompt += f'{i + 1}. {goal}\n'
full_prompt += f"""
{get_prompt(self.tools)}"""
return full_prompt
|
def construct_full_prompt(self, goals: List[str]) ->str:
prompt_start = """Your decisions must always be made independently without seeking user assistance.
Play to your strengths as an LLM and pursue simple strategies with no legal complications.
If you have completed all your tasks, make sure to use the "finish" command."""
full_prompt = (
f'You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n'
)
for i, goal in enumerate(goals):
full_prompt += f'{i + 1}. {goal}\n'
full_prompt += f'\n\n{get_prompt(self.tools)}'
return full_prompt
| null |
clear_data
|
"""
Clear data in a collection.
Args:
collection_name (Optional[str]): The name of the collection.
If not provided, the default collection will be used.
"""
if not collection_name:
collection_name = self._collection_name
self._client.drop_table(collection_name)
|
def clear_data(self, collection_name: str='') ->None:
"""
Clear data in a collection.
Args:
collection_name (Optional[str]): The name of the collection.
If not provided, the default collection will be used.
"""
if not collection_name:
collection_name = self._collection_name
self._client.drop_table(collection_name)
|
Clear data in a collection.
Args:
collection_name (Optional[str]): The name of the collection.
If not provided, the default collection will be used.
|
test_structured_args_decorator_no_infer_schema
|
"""Test functionality with structured arguments parsed as a decorator."""
@tool(infer_schema=False)
def structured_tool_input(arg1: int, arg2: Union[float, datetime], opt_arg:
Optional[dict]=None) ->str:
"""Return the arguments directly."""
return f'{arg1}, {arg2}, {opt_arg}'
assert isinstance(structured_tool_input, BaseTool)
assert structured_tool_input.name == 'structured_tool_input'
args = {'arg1': 1, 'arg2': 0.001, 'opt_arg': {'foo': 'bar'}}
with pytest.raises(ToolException):
assert structured_tool_input.run(args)
|
def test_structured_args_decorator_no_infer_schema() ->None:
"""Test functionality with structured arguments parsed as a decorator."""
@tool(infer_schema=False)
def structured_tool_input(arg1: int, arg2: Union[float, datetime],
opt_arg: Optional[dict]=None) ->str:
"""Return the arguments directly."""
return f'{arg1}, {arg2}, {opt_arg}'
assert isinstance(structured_tool_input, BaseTool)
assert structured_tool_input.name == 'structured_tool_input'
args = {'arg1': 1, 'arg2': 0.001, 'opt_arg': {'foo': 'bar'}}
with pytest.raises(ToolException):
assert structured_tool_input.run(args)
|
Test functionality with structured arguments parsed as a decorator.
|
update
|
"""Update cache based on prompt and llm_string."""
blob = _dumps_generations(return_val)
self.kv_cache.put(llm_string=_hash(llm_string), prompt=_hash(prompt),
body_blob=blob)
|
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
) ->None:
"""Update cache based on prompt and llm_string."""
blob = _dumps_generations(return_val)
self.kv_cache.put(llm_string=_hash(llm_string), prompt=_hash(prompt),
body_blob=blob)
|
Update cache based on prompt and llm_string.
|
_import_clickhouse_settings
|
from langchain_community.vectorstores.clickhouse import ClickhouseSettings
return ClickhouseSettings
|
def _import_clickhouse_settings() ->Any:
from langchain_community.vectorstores.clickhouse import ClickhouseSettings
return ClickhouseSettings
| null |
lc_secrets
|
return {'everlyai_api_key': 'EVERLYAI_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'everlyai_api_key': 'EVERLYAI_API_KEY'}
| null |
default_get_input
|
"""Return the compression chain input."""
return {'question': query, 'context': doc.page_content}
|
def default_get_input(query: str, doc: Document) ->Dict[str, Any]:
"""Return the compression chain input."""
return {'question': query, 'context': doc.page_content}
|
Return the compression chain input.
|
to_messages
|
"""Return prompt as a list of messages."""
return list(self.messages)
|
def to_messages(self) ->List[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
|
Return prompt as a list of messages.
|
test_google_generativeai_call
|
"""Test valid call to Google GenerativeAI text API."""
if model_name:
llm = GooglePalm(max_output_tokens=10, model_name=model_name)
else:
llm = GooglePalm(max_output_tokens=10)
output = llm('Say foo:')
assert isinstance(output, str)
assert llm._llm_type == 'google_palm'
if model_name and 'gemini' in model_name:
assert llm.client.model_name == 'models/gemini-pro'
else:
assert llm.model_name == 'models/text-bison-001'
|
@pytest.mark.parametrize('model_name', model_names)
def test_google_generativeai_call(model_name: str) ->None:
"""Test valid call to Google GenerativeAI text API."""
if model_name:
llm = GooglePalm(max_output_tokens=10, model_name=model_name)
else:
llm = GooglePalm(max_output_tokens=10)
output = llm('Say foo:')
assert isinstance(output, str)
assert llm._llm_type == 'google_palm'
if model_name and 'gemini' in model_name:
assert llm.client.model_name == 'models/gemini-pro'
else:
assert llm.model_name == 'models/text-bison-001'
|
Test valid call to Google GenerativeAI text API.
|
_call
|
data: Dict[str, Any] = {'prompt': prompt, 'temperature': self.temperature,
'n': self.n, **self.extra_params, **kwargs}
if (stop := self.stop or stop):
data['stop'] = stop
if self.max_tokens is not None:
data['max_tokens'] = self.max_tokens
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
return resp['choices'][0]['text']
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
data: Dict[str, Any] = {'prompt': prompt, 'temperature': self.
temperature, 'n': self.n, **self.extra_params, **kwargs}
if (stop := self.stop or stop):
data['stop'] = stop
if self.max_tokens is not None:
data['max_tokens'] = self.max_tokens
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
return resp['choices'][0]['text']
| null |
check_spacy_model
|
import spacy
if not spacy.util.is_package('en_core_web_lg'):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
|
@pytest.fixture(scope='module', autouse=True)
def check_spacy_model() ->Iterator[None]:
import spacy
if not spacy.util.is_package('en_core_web_lg'):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
| null |
_transform
|
for chunk in input:
picked = self._pick(chunk)
if picked is not None:
yield picked
|
def _transform(self, input: Iterator[Dict[str, Any]]) ->Iterator[Dict[str, Any]
]:
for chunk in input:
picked = self._pick(chunk)
if picked is not None:
yield picked
| null |
test_neo4jvector_catch_wrong_node_label
|
"""Test if node label is misspelled, but index name is correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(text_embeddings=text_embedding_pairs, embedding
=FakeEmbeddingsWithOsDimension(), url=url, username=username, password=
password, pre_delete_collection=True)
existing = Neo4jVector.from_existing_index(embedding=
FakeEmbeddingsWithOsDimension(), url=url, username=username, password=
password, index_name='vector', node_label='test')
output = existing.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
drop_vector_indexes(existing)
|
def test_neo4jvector_catch_wrong_node_label() ->None:
"""Test if node label is misspelled, but index name is correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(), url=url, username=
username, password=password, pre_delete_collection=True)
existing = Neo4jVector.from_existing_index(embedding=
FakeEmbeddingsWithOsDimension(), url=url, username=username,
password=password, index_name='vector', node_label='test')
output = existing.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
drop_vector_indexes(existing)
|
Test if node label is misspelled, but index name is correct.
|
_generate
|
forced = False
function_call = ''
if 'functions' in kwargs:
if 'function_call' in kwargs:
function_call = kwargs['function_call']
del kwargs['function_call']
else:
function_call = 'auto'
if function_call != 'none':
content = prompt.format(tools=json.dumps(kwargs['functions'], indent=2)
)
system = SystemMessage(content=content)
messages = [system] + messages
if isinstance(function_call, dict):
forced = True
function_call_name = function_call['name']
messages.append(AIMessage(content=f'<tool>{function_call_name}</tool>')
)
del kwargs['functions']
if stop is None:
stop = ['</tool_input>']
else:
stop.append('</tool_input>')
elif 'function_call' in kwargs:
raise ValueError('if `function_call` provided, `functions` must also be')
response = self.model.predict_messages(messages, stop=stop, callbacks=
run_manager, **kwargs)
completion = cast(str, response.content)
if forced:
tag_parser = TagParser()
if '<tool_input>' in completion:
tag_parser.feed(completion.strip() + '</tool_input>')
v1 = tag_parser.parse_data['tool_input'][0]
arguments = json.dumps(_destrip(v1))
else:
v1 = completion
arguments = ''
kwargs = {'function_call': {'name': function_call_name, 'arguments':
arguments}}
message = AIMessage(content='', additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
elif '<tool>' in completion:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + '</tool_input>')
msg = completion.split('<tool>')[0].strip()
v1 = tag_parser.parse_data['tool_input'][0]
kwargs = {'function_call': {'name': tag_parser.parse_data['tool'][0],
'arguments': json.dumps(_destrip(v1))}}
message = AIMessage(content=msg, additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response.content = cast(str, response.content).strip()
return ChatResult(generations=[ChatGeneration(message=response)])
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
forced = False
function_call = ''
if 'functions' in kwargs:
if 'function_call' in kwargs:
function_call = kwargs['function_call']
del kwargs['function_call']
else:
function_call = 'auto'
if function_call != 'none':
content = prompt.format(tools=json.dumps(kwargs['functions'],
indent=2))
system = SystemMessage(content=content)
messages = [system] + messages
if isinstance(function_call, dict):
forced = True
function_call_name = function_call['name']
messages.append(AIMessage(content=
f'<tool>{function_call_name}</tool>'))
del kwargs['functions']
if stop is None:
stop = ['</tool_input>']
else:
stop.append('</tool_input>')
elif 'function_call' in kwargs:
raise ValueError(
'if `function_call` provided, `functions` must also be')
response = self.model.predict_messages(messages, stop=stop, callbacks=
run_manager, **kwargs)
completion = cast(str, response.content)
if forced:
tag_parser = TagParser()
if '<tool_input>' in completion:
tag_parser.feed(completion.strip() + '</tool_input>')
v1 = tag_parser.parse_data['tool_input'][0]
arguments = json.dumps(_destrip(v1))
else:
v1 = completion
arguments = ''
kwargs = {'function_call': {'name': function_call_name, 'arguments':
arguments}}
message = AIMessage(content='', additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
elif '<tool>' in completion:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + '</tool_input>')
msg = completion.split('<tool>')[0].strip()
v1 = tag_parser.parse_data['tool_input'][0]
kwargs = {'function_call': {'name': tag_parser.parse_data['tool'][0
], 'arguments': json.dumps(_destrip(v1))}}
message = AIMessage(content=msg, additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response.content = cast(str, response.content).strip()
return ChatResult(generations=[ChatGeneration(message=response)])
| null |
input_keys
|
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
return [self.input_key]
| null |
test_huggingface_endpoint_call_error
|
"""Test valid call to HuggingFace that errors."""
llm = HuggingFaceEndpoint(model_kwargs={'max_new_tokens': -1})
with pytest.raises(ValueError):
llm('Say foo:')
|
def test_huggingface_endpoint_call_error() ->None:
"""Test valid call to HuggingFace that errors."""
llm = HuggingFaceEndpoint(model_kwargs={'max_new_tokens': -1})
with pytest.raises(ValueError):
llm('Say foo:')
|
Test valid call to HuggingFace that errors.
|
get_cassandra_connection
|
contact_points = [cp.strip() for cp in os.environ.get(
'CASSANDRA_CONTACT_POINTS', '').split(',') if cp.strip()]
CASSANDRA_KEYSPACE = os.environ['CASSANDRA_KEYSPACE']
CASSANDRA_USERNAME = os.environ.get('CASSANDRA_USERNAME')
CASSANDRA_PASSWORD = os.environ.get('CASSANDRA_PASSWORD')
if CASSANDRA_USERNAME and CASSANDRA_PASSWORD:
auth_provider = PlainTextAuthProvider(CASSANDRA_USERNAME,
CASSANDRA_PASSWORD)
else:
auth_provider = None
c_cluster = Cluster(contact_points if contact_points else None,
auth_provider=auth_provider)
session = c_cluster.connect()
return session, CASSANDRA_KEYSPACE
|
def get_cassandra_connection():
contact_points = [cp.strip() for cp in os.environ.get(
'CASSANDRA_CONTACT_POINTS', '').split(',') if cp.strip()]
CASSANDRA_KEYSPACE = os.environ['CASSANDRA_KEYSPACE']
CASSANDRA_USERNAME = os.environ.get('CASSANDRA_USERNAME')
CASSANDRA_PASSWORD = os.environ.get('CASSANDRA_PASSWORD')
if CASSANDRA_USERNAME and CASSANDRA_PASSWORD:
auth_provider = PlainTextAuthProvider(CASSANDRA_USERNAME,
CASSANDRA_PASSWORD)
else:
auth_provider = None
c_cluster = Cluster(contact_points if contact_points else None,
auth_provider=auth_provider)
session = c_cluster.connect()
return session, CASSANDRA_KEYSPACE
| null |
combine_docs
|
"""Combine documents into a single string.
Args:
docs: List[Document], the documents to combine
**kwargs: Other parameters to use in combining documents, often
other inputs to the prompt.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
|
@abstractmethod
def combine_docs(self, docs: List[Document], **kwargs: Any) ->Tuple[str, dict]:
"""Combine documents into a single string.
Args:
docs: List[Document], the documents to combine
**kwargs: Other parameters to use in combining documents, often
other inputs to the prompt.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
|
Combine documents into a single string.
Args:
docs: List[Document], the documents to combine
**kwargs: Other parameters to use in combining documents, often
other inputs to the prompt.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
|
_call_eas
|
"""Generate text from the eas service."""
headers = {'Content-Type': 'application/json', 'Authorization':
f'{self.eas_service_token}'}
if self.version == '1.0':
body = {'input_ids': f'{prompt}'}
else:
body = {'prompt': f'{prompt}'}
for key, value in params.items():
body[key] = value
response = requests.post(self.eas_service_url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f'Request failed with status code {response.status_code} and message {response.text}'
)
try:
return json.loads(response.text)
except Exception as e:
if isinstance(e, json.decoder.JSONDecodeError):
return response.text
raise e
|
def _call_eas(self, prompt: str='', params: Dict={}) ->Any:
"""Generate text from the eas service."""
headers = {'Content-Type': 'application/json', 'Authorization':
f'{self.eas_service_token}'}
if self.version == '1.0':
body = {'input_ids': f'{prompt}'}
else:
body = {'prompt': f'{prompt}'}
for key, value in params.items():
body[key] = value
response = requests.post(self.eas_service_url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f'Request failed with status code {response.status_code} and message {response.text}'
)
try:
return json.loads(response.text)
except Exception as e:
if isinstance(e, json.decoder.JSONDecodeError):
return response.text
raise e
|
Generate text from the eas service.
|
validate_naming
|
"""Fix backwards compatibility in naming."""
if 'combine_document_chain' in values:
values['combine_documents_chain'] = values.pop('combine_document_chain')
return values
|
@root_validator(pre=True)
def validate_naming(cls, values: Dict) ->Dict:
"""Fix backwards compatibility in naming."""
if 'combine_document_chain' in values:
values['combine_documents_chain'] = values.pop('combine_document_chain'
)
return values
|
Fix backwards compatibility in naming.
|
_import_atlas
|
from langchain_community.vectorstores.atlas import AtlasDB
return AtlasDB
|
def _import_atlas() ->Any:
from langchain_community.vectorstores.atlas import AtlasDB
return AtlasDB
| null |
get_issues
|
"""
Fetches all open issues from the repo
Returns:
str: A plaintext report containing the number of issues
and each issue's title and number.
"""
issues = self.gitlab_repo_instance.issues.list(state='opened')
if len(issues) > 0:
parsed_issues = self.parse_issues(issues)
parsed_issues_str = 'Found ' + str(len(parsed_issues)
) + ' issues:\n' + str(parsed_issues)
return parsed_issues_str
else:
return 'No open issues available'
|
def get_issues(self) ->str:
"""
Fetches all open issues from the repo
Returns:
str: A plaintext report containing the number of issues
and each issue's title and number.
"""
issues = self.gitlab_repo_instance.issues.list(state='opened')
if len(issues) > 0:
parsed_issues = self.parse_issues(issues)
parsed_issues_str = 'Found ' + str(len(parsed_issues)
) + ' issues:\n' + str(parsed_issues)
return parsed_issues_str
else:
return 'No open issues available'
|
Fetches all open issues from the repo
Returns:
str: A plaintext report containing the number of issues
and each issue's title and number.
|
fetch_memories
|
"""Fetch related memories."""
if now is not None:
with mock_now(now):
return self.memory_retriever.get_relevant_documents(observation)
else:
return self.memory_retriever.get_relevant_documents(observation)
|
def fetch_memories(self, observation: str, now: Optional[datetime]=None
) ->List[Document]:
"""Fetch related memories."""
if now is not None:
with mock_now(now):
return self.memory_retriever.get_relevant_documents(observation)
else:
return self.memory_retriever.get_relevant_documents(observation)
|
Fetch related memories.
|
extend
|
"""Add all nodes and edges from another graph.
Note this doesn't check for duplicates, nor does it connect the graphs."""
self.nodes.update(graph.nodes)
self.edges.extend(graph.edges)
|
def extend(self, graph: Graph) ->None:
"""Add all nodes and edges from another graph.
Note this doesn't check for duplicates, nor does it connect the graphs."""
self.nodes.update(graph.nodes)
self.edges.extend(graph.edges)
|
Add all nodes and edges from another graph.
Note this doesn't check for duplicates, nor does it connect the graphs.
|
config_specs
|
return get_unique_config_specs(spec for step in self.steps.values() for
spec in step.config_specs)
|
@property
def config_specs(self) ->List[ConfigurableFieldSpec]:
return get_unique_config_specs(spec for step in self.steps.values() for
spec in step.config_specs)
| null |
embed_query
|
"""Compute query embeddings using a JohnSnowLabs transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
def embed_query(self, text: str) ->List[float]:
"""Compute query embeddings using a JohnSnowLabs transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
Compute query embeddings using a JohnSnowLabs transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
get_astream
|
"""Call to client astream methods with call scope"""
payload = self.get_payload(inputs=inputs, stream=True, labels=labels, **kwargs)
return self.client.get_req_astream(self.model, stop=stop, payload=payload)
|
def get_astream(self, inputs: Sequence[Dict], labels: Optional[dict]=None,
stop: Optional[Sequence[str]]=None, **kwargs: Any) ->AsyncIterator:
"""Call to client astream methods with call scope"""
payload = self.get_payload(inputs=inputs, stream=True, labels=labels,
**kwargs)
return self.client.get_req_astream(self.model, stop=stop, payload=payload)
|
Call to client astream methods with call scope
|
lc_attributes
|
attributes: Dict[str, Any] = {}
if self.model:
attributes['model'] = self.model
if self.streaming:
attributes['streaming'] = self.streaming
if self.return_type:
attributes['return_type'] = self.return_type
return attributes
|
@property
def lc_attributes(self) ->Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.model:
attributes['model'] = self.model
if self.streaming:
attributes['streaming'] = self.streaming
if self.return_type:
attributes['return_type'] = self.return_type
return attributes
| null |
_get_entity_from_observation
|
prompt = PromptTemplate.from_template(
'What is the observed entity in the following observation? {observation}' +
'\nEntity=')
return self.chain(prompt).run(observation=observation).strip()
|
def _get_entity_from_observation(self, observation: str) ->str:
prompt = PromptTemplate.from_template(
'What is the observed entity in the following observation? {observation}'
+ '\nEntity=')
return self.chain(prompt).run(observation=observation).strip()
| null |
_run
|
"""Use the tool."""
return self.search_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return self.search_wrapper.run(query)
|
Use the tool.
|
on_chain_start
|
"""Run when chain starts."""
self.chain_run_id = kwargs.get('run_id', None)
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""Run when chain starts."""
self.chain_run_id = kwargs.get('run_id', None)
|
Run when chain starts.
|
mocked_requests_post
|
assert url.startswith(_INFINITY_BASE_URL)
assert 'model' in json and _MODEL_ID in json['model']
assert json
assert headers
assert 'input' in json and isinstance(json['input'], list)
embeddings = []
for inp in json['input']:
if 'pizza' in inp:
v = [1.0, 0.0, 0.0]
elif 'document' in inp:
v = [0.0, 0.9, 0.0]
else:
v = [0.0, 0.0, -1.0]
if len(inp) > 10:
v[2] += 0.1
embeddings.append({'embedding': v})
return MockResponse(json_data={'data': embeddings}, status_code=200)
|
def mocked_requests_post(url: str, headers: dict, json: dict) ->MockResponse:
assert url.startswith(_INFINITY_BASE_URL)
assert 'model' in json and _MODEL_ID in json['model']
assert json
assert headers
assert 'input' in json and isinstance(json['input'], list)
embeddings = []
for inp in json['input']:
if 'pizza' in inp:
v = [1.0, 0.0, 0.0]
elif 'document' in inp:
v = [0.0, 0.9, 0.0]
else:
v = [0.0, 0.0, -1.0]
if len(inp) > 10:
v[2] += 0.1
embeddings.append({'embedding': v})
return MockResponse(json_data={'data': embeddings}, status_code=200)
| null |
test_chat_valid_with_partial_variables
|
messages = [HumanMessagePromptTemplate.from_template(
'Do something with {question} using {context} giving it like {formatins}')]
prompt = ChatPromptTemplate(messages=messages, input_variables=['question',
'context'], partial_variables={'formatins': 'some structure'})
assert set(prompt.input_variables) == {'question', 'context'}
assert prompt.partial_variables == {'formatins': 'some structure'}
|
def test_chat_valid_with_partial_variables() ->None:
messages = [HumanMessagePromptTemplate.from_template(
'Do something with {question} using {context} giving it like {formatins}'
)]
prompt = ChatPromptTemplate(messages=messages, input_variables=[
'question', 'context'], partial_variables={'formatins':
'some structure'})
assert set(prompt.input_variables) == {'question', 'context'}
assert prompt.partial_variables == {'formatins': 'some structure'}
| null |
output_schema
|
"""The type of output this runnable produces specified as a pydantic model."""
return self.get_output_schema()
|
@property
def output_schema(self) ->Type[BaseModel]:
"""The type of output this runnable produces specified as a pydantic model."""
return self.get_output_schema()
|
The type of output this runnable produces specified as a pydantic model.
|
test_test_group_dependencies
|
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
test_group_deps = sorted(poetry_conf['group']['test']['dependencies'])
assert test_group_deps == sorted(['duckdb-engine', 'freezegun',
'langchain-core', 'lark', 'pandas', 'pytest', 'pytest-asyncio',
'pytest-cov', 'pytest-dotenv', 'pytest-mock', 'pytest-socket',
'pytest-watcher', 'responses', 'syrupy', 'requests-mock'])
|
def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) ->None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
test_group_deps = sorted(poetry_conf['group']['test']['dependencies'])
assert test_group_deps == sorted(['duckdb-engine', 'freezegun',
'langchain-core', 'lark', 'pandas', 'pytest', 'pytest-asyncio',
'pytest-cov', 'pytest-dotenv', 'pytest-mock', 'pytest-socket',
'pytest-watcher', 'responses', 'syrupy', 'requests-mock'])
|
Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
|
_fix_text
|
"""Fix the text."""
raise ValueError('fix_text not implemented for this agent.')
|
def _fix_text(self, text: str) ->str:
"""Fix the text."""
raise ValueError('fix_text not implemented for this agent.')
|
Fix the text.
|
test_stream
|
"""Test streaming tokens from NVIDIA TRT."""
llm = TritonTensorRTLLM(model_name=_MODEL_NAME)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)
|
@pytest.mark.skip(reason='Need a working Triton server')
def test_stream() ->None:
"""Test streaming tokens from NVIDIA TRT."""
llm = TritonTensorRTLLM(model_name=_MODEL_NAME)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)
|
Test streaming tokens from NVIDIA TRT.
|
_load_refine_chain
|
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose)
return RefineDocumentsChain(initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain, document_variable_name=
document_variable_name, initial_response_name=initial_response_name,
verbose=verbose, **kwargs)
|
def _load_refine_chain(llm: BaseLanguageModel, question_prompt:
BasePromptTemplate=refine_prompts.PROMPT, refine_prompt:
BasePromptTemplate=refine_prompts.REFINE_PROMPT, document_variable_name:
str='text', initial_response_name: str='existing_answer', refine_llm:
Optional[BaseLanguageModel]=None, verbose: Optional[bool]=None, **
kwargs: Any) ->RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=
verbose)
return RefineDocumentsChain(initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain, document_variable_name=
document_variable_name, initial_response_name=initial_response_name,
verbose=verbose, **kwargs)
| null |
llm_with_fallbacks
|
error_llm = FakeListLLM(responses=['foo'], i=1)
pass_llm = FakeListLLM(responses=['bar'])
return error_llm.with_fallbacks([pass_llm])
|
@pytest.fixture()
def llm_with_fallbacks() ->RunnableWithFallbacks:
error_llm = FakeListLLM(responses=['foo'], i=1)
pass_llm = FakeListLLM(responses=['bar'])
return error_llm.with_fallbacks([pass_llm])
| null |
test_similarity_search_without_metadata
|
"""Test end to end construction and search without metadata."""
texts = ['foo', 'bar', 'baz']
docsearch = Weaviate.from_texts(texts, embedding_openai, weaviate_url=
weaviate_url)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_without_metadata(self, weaviate_url: str,
embedding_openai: OpenAIEmbeddings) ->None:
"""Test end to end construction and search without metadata."""
texts = ['foo', 'bar', 'baz']
docsearch = Weaviate.from_texts(texts, embedding_openai, weaviate_url=
weaviate_url)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search without metadata.
|
_create_index
|
from google.api_core.exceptions import ClientError
if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
distance_type = 'EUCLIDEAN'
elif self.distance_strategy == DistanceStrategy.COSINE:
distance_type = 'COSINE'
else:
distance_type = 'EUCLIDEAN'
index_name = f'{self.table_name}_langchain_index'
try:
sql = f"""
CREATE VECTOR INDEX IF NOT EXISTS
`{index_name}`
ON `{self.full_table_id}`({self.text_embedding_field})
OPTIONS(distance_type="{distance_type}", index_type="IVF")
"""
self.bq_client.query(sql).result()
self._have_index = True
except ClientError as ex:
self._logger.debug('Vector index creation failed (%s).', ex.args[0])
finally:
self._creating_index = False
|
def _create_index(self):
from google.api_core.exceptions import ClientError
if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
distance_type = 'EUCLIDEAN'
elif self.distance_strategy == DistanceStrategy.COSINE:
distance_type = 'COSINE'
else:
distance_type = 'EUCLIDEAN'
index_name = f'{self.table_name}_langchain_index'
try:
sql = f"""
CREATE VECTOR INDEX IF NOT EXISTS
`{index_name}`
ON `{self.full_table_id}`({self.text_embedding_field})
OPTIONS(distance_type="{distance_type}", index_type="IVF")
"""
self.bq_client.query(sql).result()
self._have_index = True
except ClientError as ex:
self._logger.debug('Vector index creation failed (%s).', ex.args[0])
finally:
self._creating_index = False
| null |
_format_messages
|
"""Format messages for anthropic."""
"""
[
{
"role": _message_type_lookups[m.type],
"content": [_AnthropicMessageContent(text=m.content).dict()],
}
for m in messages
]
"""
system = None
formatted_messages = []
for i, message in enumerate(messages):
if not isinstance(message.content, str):
raise ValueError(
'Anthropic Messages API only supports text generation.')
if message.type == 'system':
if i != 0:
raise ValueError(
'System message must be at beginning of message list.')
system = message.content
else:
formatted_messages.append({'role': _message_type_lookups[message.
type], 'content': message.content})
return system, formatted_messages
|
def _format_messages(messages: List[BaseMessage]) ->Tuple[Optional[str],
List[Dict]]:
"""Format messages for anthropic."""
"""
[
{
"role": _message_type_lookups[m.type],
"content": [_AnthropicMessageContent(text=m.content).dict()],
}
for m in messages
]
"""
system = None
formatted_messages = []
for i, message in enumerate(messages):
if not isinstance(message.content, str):
raise ValueError(
'Anthropic Messages API only supports text generation.')
if message.type == 'system':
if i != 0:
raise ValueError(
'System message must be at beginning of message list.')
system = message.content
else:
formatted_messages.append({'role': _message_type_lookups[
message.type], 'content': message.content})
return system, formatted_messages
|
Format messages for anthropic.
|
predict
|
num_items = len(event.to_select_from)
return [(i, 1.0 / num_items) for i in range(num_items)]
|
def predict(self, event: PickBestEvent) ->List[Tuple[int, float]]:
num_items = len(event.to_select_from)
return [(i, 1.0 / num_items) for i in range(num_items)]
| null |
_import_annoy
|
from langchain_community.vectorstores.annoy import Annoy
return Annoy
|
def _import_annoy() ->Any:
from langchain_community.vectorstores.annoy import Annoy
return Annoy
| null |
test_placeholder
|
"""Used for compiling integration tests without running any real tests."""
pass
|
@pytest.mark.compile
def test_placeholder() ->None:
"""Used for compiling integration tests without running any real tests."""
pass
|
Used for compiling integration tests without running any real tests.
|
__init__
|
self._approve = approve
self._should_check = should_check
|
def __init__(self, approve: Callable[[Any], Awaitable[bool]]=
_adefault_approve, should_check: Callable[[Dict[str, Any]], bool]=
_default_true):
self._approve = approve
self._should_check = should_check
| null |
test_cloudflare_workersai_call
|
responses.add(responses.POST,
'https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8'
, json={'result': {'response': '4'}}, status=200)
llm = CloudflareWorkersAI(account_id='my_account_id', api_token=
'my_api_token', model='@cf/meta/llama-2-7b-chat-int8')
output = llm('What is 2 + 2?')
assert output == '4'
|
@responses.activate
def test_cloudflare_workersai_call() ->None:
responses.add(responses.POST,
'https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8'
, json={'result': {'response': '4'}}, status=200)
llm = CloudflareWorkersAI(account_id='my_account_id', api_token=
'my_api_token', model='@cf/meta/llama-2-7b-chat-int8')
output = llm('What is 2 + 2?')
assert output == '4'
| null |
_prepare_evaluator_output
|
feedback: EvaluationResult = output['feedback']
if RUN_KEY not in feedback.evaluator_info:
feedback.evaluator_info[RUN_KEY] = output[RUN_KEY]
return feedback
|
def _prepare_evaluator_output(self, output: Dict[str, Any]) ->EvaluationResult:
feedback: EvaluationResult = output['feedback']
if RUN_KEY not in feedback.evaluator_info:
feedback.evaluator_info[RUN_KEY] = output[RUN_KEY]
return feedback
| null |
test_exists
|
"""Test checking if keys exist in the database."""
keys = ['key1', 'key2', 'key3']
manager.update(keys)
exists = manager.exists(keys)
assert len(exists) == len(keys)
assert exists == [True, True, True]
exists = manager.exists(['key1', 'key4'])
assert len(exists) == 2
assert exists == [True, False]
|
def test_exists(manager: SQLRecordManager) ->None:
"""Test checking if keys exist in the database."""
keys = ['key1', 'key2', 'key3']
manager.update(keys)
exists = manager.exists(keys)
assert len(exists) == len(keys)
assert exists == [True, True, True]
exists = manager.exists(['key1', 'key4'])
assert len(exists) == 2
assert exists == [True, False]
|
Test checking if keys exist in the database.
|
test_tiledb_ivf_flat_updates
|
"""Test end to end construction and search."""
dimensions = 10
index_uri = str(tmp_path)
embedding = ConsistentFakeEmbeddings(dimensionality=dimensions)
TileDB.create(index_uri=index_uri, index_type='IVF_FLAT', dimensions=
dimensions, vector_type=np.dtype('float32'), metadatas=False)
docsearch = TileDB.load(index_uri=index_uri, embedding=embedding)
output = docsearch.similarity_search('foo', k=2)
assert output == []
docsearch.add_texts(texts=['foo', 'bar', 'baz'], ids=['1', '2', '3'])
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
docsearch.delete(['1', '3'])
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='bar')]
output = docsearch.similarity_search('baz', k=1)
assert output == [Document(page_content='bar')]
docsearch.add_texts(texts=['fooo', 'bazz'], ids=['4', '5'])
output = docsearch.similarity_search('fooo', k=1)
assert output == [Document(page_content='fooo')]
output = docsearch.similarity_search('bazz', k=1)
assert output == [Document(page_content='bazz')]
docsearch.consolidate_updates()
output = docsearch.similarity_search('fooo', k=1)
assert output == [Document(page_content='fooo')]
output = docsearch.similarity_search('bazz', k=1)
assert output == [Document(page_content='bazz')]
|
@pytest.mark.requires('tiledb-vector-search')
def test_tiledb_ivf_flat_updates(tmp_path: Path) ->None:
"""Test end to end construction and search."""
dimensions = 10
index_uri = str(tmp_path)
embedding = ConsistentFakeEmbeddings(dimensionality=dimensions)
TileDB.create(index_uri=index_uri, index_type='IVF_FLAT', dimensions=
dimensions, vector_type=np.dtype('float32'), metadatas=False)
docsearch = TileDB.load(index_uri=index_uri, embedding=embedding)
output = docsearch.similarity_search('foo', k=2)
assert output == []
docsearch.add_texts(texts=['foo', 'bar', 'baz'], ids=['1', '2', '3'])
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
docsearch.delete(['1', '3'])
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='bar')]
output = docsearch.similarity_search('baz', k=1)
assert output == [Document(page_content='bar')]
docsearch.add_texts(texts=['fooo', 'bazz'], ids=['4', '5'])
output = docsearch.similarity_search('fooo', k=1)
assert output == [Document(page_content='fooo')]
output = docsearch.similarity_search('bazz', k=1)
assert output == [Document(page_content='bazz')]
docsearch.consolidate_updates()
output = docsearch.similarity_search('fooo', k=1)
assert output == [Document(page_content='fooo')]
output = docsearch.similarity_search('bazz', k=1)
assert output == [Document(page_content='bazz')]
|
Test end to end construction and search.
|
_convert_message_to_dict
|
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
'Function messages are not supported by Databricks. Please create a feature request at https://github.com/mlflow/mlflow/issues.'
)
else:
raise ValueError(f'Got unknown message type: {message}')
if 'function_call' in message.additional_kwargs:
ChatMlflow._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
'Additional message arguments are unsupported by Databricks and will be ignored: %s'
, message.additional_kwargs)
return message_dict
|
@staticmethod
def _convert_message_to_dict(message: BaseMessage) ->dict:
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
'Function messages are not supported by Databricks. Please create a feature request at https://github.com/mlflow/mlflow/issues.'
)
else:
raise ValueError(f'Got unknown message type: {message}')
if 'function_call' in message.additional_kwargs:
ChatMlflow._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
'Additional message arguments are unsupported by Databricks and will be ignored: %s'
, message.additional_kwargs)
return message_dict
| null |
_get_resource
|
endpoint = IUGU_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
|
def _get_resource(self) ->List[Document]:
endpoint = IUGU_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
| null |
from_llm
|
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
gremlin_generation_chain = LLMChain(llm=llm, prompt=gremlin_prompt)
return cls(qa_chain=qa_chain, gremlin_generation_chain=
gremlin_generation_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate=
CYPHER_QA_PROMPT, gremlin_prompt: BasePromptTemplate=
GREMLIN_GENERATION_PROMPT, **kwargs: Any) ->HugeGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
gremlin_generation_chain = LLMChain(llm=llm, prompt=gremlin_prompt)
return cls(qa_chain=qa_chain, gremlin_generation_chain=
gremlin_generation_chain, **kwargs)
|
Initialize from LLM.
|
_call
|
"""Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
|
@abstractmethod
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
"""Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
|
Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
|
test_agent_iterator_failing_tool
|
"""Test AgentExecutorIterator with a tool that raises an exception."""
bad_action_name = 'FailingTool'
responses = [
f"""I'm turning evil
Action: {bad_action_name}
Action Input: misalignment"""
, """Oh well
Final Answer: curses foiled again"""]
fake_llm = FakeListLLM(responses=responses)
tools = [Tool(name='FailingTool', func=lambda x: 1 / 0, description=
'A tool that fails')]
agent = initialize_agent(tools, fake_llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent_iter = agent.iter(inputs='when was langchain made')
assert isinstance(agent_iter, AgentExecutorIterator)
iterator = iter(agent_iter)
with pytest.raises(ZeroDivisionError):
next(iterator)
|
def test_agent_iterator_failing_tool() ->None:
"""Test AgentExecutorIterator with a tool that raises an exception."""
bad_action_name = 'FailingTool'
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment"
, """Oh well
Final Answer: curses foiled again"""]
fake_llm = FakeListLLM(responses=responses)
tools = [Tool(name='FailingTool', func=lambda x: 1 / 0, description=
'A tool that fails')]
agent = initialize_agent(tools, fake_llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent_iter = agent.iter(inputs='when was langchain made')
assert isinstance(agent_iter, AgentExecutorIterator)
iterator = iter(agent_iter)
with pytest.raises(ZeroDivisionError):
next(iterator)
|
Test AgentExecutorIterator with a tool that raises an exception.
|
__init__
|
"""Initialize Redis vector store with necessary components."""
self._check_deprecated_kwargs(kwargs)
try:
import redis
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
self.index_name = index_name
self._embeddings = embedding
try:
redis_client = get_client(redis_url=redis_url, **kwargs)
check_redis_module_exist(redis_client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f'Redis failed to connect: {e}')
self.client = redis_client
self.relevance_score_fn = relevance_score_fn
self._schema = self._get_schema_with_defaults(index_schema, vector_schema)
self.key_prefix = key_prefix if key_prefix is not None else f'doc:{index_name}'
|
def __init__(self, redis_url: str, index_name: str, embedding: Embeddings,
index_schema: Optional[Union[Dict[str, str], str, os.PathLike]]=None,
vector_schema: Optional[Dict[str, Union[str, int]]]=None,
relevance_score_fn: Optional[Callable[[float], float]]=None, key_prefix:
Optional[str]=None, **kwargs: Any):
"""Initialize Redis vector store with necessary components."""
self._check_deprecated_kwargs(kwargs)
try:
import redis
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
self.index_name = index_name
self._embeddings = embedding
try:
redis_client = get_client(redis_url=redis_url, **kwargs)
check_redis_module_exist(redis_client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f'Redis failed to connect: {e}')
self.client = redis_client
self.relevance_score_fn = relevance_score_fn
self._schema = self._get_schema_with_defaults(index_schema, vector_schema)
self.key_prefix = (key_prefix if key_prefix is not None else
f'doc:{index_name}')
|
Initialize Redis vector store with necessary components.
|
_import_vearch
|
from langchain_community.vectorstores.vearch import Vearch
return Vearch
|
def _import_vearch() ->Any:
from langchain_community.vectorstores.vearch import Vearch
return Vearch
| null |
_post
|
return self.request('POST', url, request)
|
def _post(self, url: str, request: Any) ->Any:
return self.request('POST', url, request)
| null |
from_texts
|
"""Construct Annoy wrapper from raw documents.
Args:
texts: List of documents to index.
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Annoy
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = Annoy.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(texts, embeddings, embedding, metadatas, metric, trees,
n_jobs, **kwargs)
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, metric: str=DEFAULT_METRIC, trees: int=100,
n_jobs: int=-1, **kwargs: Any) ->Annoy:
"""Construct Annoy wrapper from raw documents.
Args:
texts: List of documents to index.
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Annoy
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = Annoy.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(texts, embeddings, embedding, metadatas, metric,
trees, n_jobs, **kwargs)
|
Construct Annoy wrapper from raw documents.
Args:
texts: List of documents to index.
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Annoy
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = Annoy.from_texts(texts, embeddings)
|
test_openai_modelname_to_contextsize_invalid
|
"""Test model name to context size on an invalid model."""
with pytest.raises(ValueError):
OpenAI().modelname_to_contextsize('foobar')
|
def test_openai_modelname_to_contextsize_invalid() ->None:
"""Test model name to context size on an invalid model."""
with pytest.raises(ValueError):
OpenAI().modelname_to_contextsize('foobar')
|
Test model name to context size on an invalid model.
|
_get_relevant_documents
|
"""Get relevant documents given a user query.
Args:
question: user query
Returns:
Unique union of relevant documents from all generated queries
"""
queries = self.generate_queries(query, run_manager)
if self.include_original:
queries.append(query)
documents = self.retrieve_documents(queries, run_manager)
return self.unique_union(documents)
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""Get relevant documents given a user query.
Args:
question: user query
Returns:
Unique union of relevant documents from all generated queries
"""
queries = self.generate_queries(query, run_manager)
if self.include_original:
queries.append(query)
documents = self.retrieve_documents(queries, run_manager)
return self.unique_union(documents)
|
Get relevant documents given a user query.
Args:
question: user query
Returns:
Unique union of relevant documents from all generated queries
|
_identifying_params
|
return {'responses': self.responses}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
return {'responses': self.responses}
| null |
requires_reference
|
"""Whether this evaluator requires a reference label."""
return False
|
@property
def requires_reference(self) ->bool:
"""Whether this evaluator requires a reference label."""
return False
|
Whether this evaluator requires a reference label.
|
_invocation_params
|
params = {**self._default_params, **kwargs}
if stop is not None:
params['stop'] = stop
if params.get('stream'):
params['incremental_output'] = True
return params
|
def _invocation_params(self, stop: Any, **kwargs: Any) ->Dict[str, Any]:
params = {**self._default_params, **kwargs}
if stop is not None:
params['stop'] = stop
if params.get('stream'):
params['incremental_output'] = True
return params
| null |
test_similarity_search_with_score
|
"""Test similarity search with score."""
output, score = deeplake_datastore.similarity_search_with_score('foo', k=1,
distance_metric=distance_metric)[0]
assert output == Document(page_content='foo', metadata={'page': '0'})
if distance_metric == 'cos':
assert score == 1.0
else:
assert score == 0.0
deeplake_datastore.delete_dataset()
|
def test_similarity_search_with_score(deeplake_datastore: DeepLake,
distance_metric: str) ->None:
"""Test similarity search with score."""
output, score = deeplake_datastore.similarity_search_with_score('foo',
k=1, distance_metric=distance_metric)[0]
assert output == Document(page_content='foo', metadata={'page': '0'})
if distance_metric == 'cos':
assert score == 1.0
else:
assert score == 0.0
deeplake_datastore.delete_dataset()
|
Test similarity search with score.
|
test_model
|
"""Test model kwarg works."""
chat = ChatZhipuAI(model='chatglm_turbo')
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_model() ->None:
"""Test model kwarg works."""
chat = ChatZhipuAI(model='chatglm_turbo')
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test model kwarg works.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.