method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_llm_type
|
return 'chat-google-generative-ai'
|
@property
def _llm_type(self) ->str:
return 'chat-google-generative-ai'
| null |
_process_llm_result
|
run_manager.on_text(llm_output, color='green', verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search('^```text(.*?)```', llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
run_manager.on_text('\nAnswer: ', verbose=self.verbose)
run_manager.on_text(output, color='yellow', verbose=self.verbose)
answer = 'Answer: ' + output
elif llm_output.startswith('Answer:'):
answer = llm_output
elif 'Answer:' in llm_output:
answer = 'Answer: ' + llm_output.split('Answer:')[-1]
else:
raise ValueError(f'unknown format from LLM: {llm_output}')
return {self.output_key: answer}
|
def _process_llm_result(self, llm_output: str, run_manager:
CallbackManagerForChainRun) ->Dict[str, str]:
run_manager.on_text(llm_output, color='green', verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search('^```text(.*?)```', llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
run_manager.on_text('\nAnswer: ', verbose=self.verbose)
run_manager.on_text(output, color='yellow', verbose=self.verbose)
answer = 'Answer: ' + output
elif llm_output.startswith('Answer:'):
answer = llm_output
elif 'Answer:' in llm_output:
answer = 'Answer: ' + llm_output.split('Answer:')[-1]
else:
raise ValueError(f'unknown format from LLM: {llm_output}')
return {self.output_key: answer}
| null |
_construct_run_evaluator
|
if isinstance(eval_config, (EvaluatorType, str)):
if not isinstance(eval_config, EvaluatorType):
eval_config = EvaluatorType(eval_config)
evaluator_ = load_evaluator(eval_config, llm=eval_llm)
eval_type_tag = eval_config.value
else:
kwargs = {'llm': eval_llm, **eval_config.get_kwargs()}
evaluator_ = load_evaluator(eval_config.evaluator_type, **kwargs)
eval_type_tag = eval_config.evaluator_type.value
if isinstance(eval_config, smith_eval_config.SingleKeyEvalConfig):
input_key = eval_config.input_key or input_key
prediction_key = eval_config.prediction_key or prediction_key
reference_key = eval_config.reference_key or reference_key
if isinstance(evaluator_, StringEvaluator):
if evaluator_.requires_reference and reference_key is None:
raise ValueError(
f'Must specify reference_key in smith_eval.RunEvalConfig to use evaluator of type {eval_type_tag} with dataset with multiple output keys: {example_outputs}.'
)
run_evaluator = smith_eval.StringRunEvaluatorChain.from_run_and_data_type(
evaluator_, run_type, data_type, input_key=input_key,
prediction_key=prediction_key, reference_key=reference_key, tags=[
eval_type_tag])
elif isinstance(evaluator_, PairwiseStringEvaluator):
raise NotImplementedError(
f"""Run evaluator for {eval_type_tag} is not implemented. PairwiseStringEvaluators compare the outputs of two different models rather than the output of a single model. Did you mean to use a StringEvaluator instead?
See: https://python.langchain.com/docs/guides/evaluation/string/"""
)
else:
raise NotImplementedError(
f'Run evaluator for {eval_type_tag} is not implemented')
return run_evaluator
|
def _construct_run_evaluator(eval_config: Union[EvaluatorType, str,
smith_eval_config.EvalConfig], eval_llm: Optional[BaseLanguageModel],
run_type: str, data_type: DataType, example_outputs: Optional[List[str]
], reference_key: Optional[str], input_key: Optional[str],
prediction_key: Optional[str]) ->RunEvaluator:
if isinstance(eval_config, (EvaluatorType, str)):
if not isinstance(eval_config, EvaluatorType):
eval_config = EvaluatorType(eval_config)
evaluator_ = load_evaluator(eval_config, llm=eval_llm)
eval_type_tag = eval_config.value
else:
kwargs = {'llm': eval_llm, **eval_config.get_kwargs()}
evaluator_ = load_evaluator(eval_config.evaluator_type, **kwargs)
eval_type_tag = eval_config.evaluator_type.value
if isinstance(eval_config, smith_eval_config.SingleKeyEvalConfig):
input_key = eval_config.input_key or input_key
prediction_key = eval_config.prediction_key or prediction_key
reference_key = eval_config.reference_key or reference_key
if isinstance(evaluator_, StringEvaluator):
if evaluator_.requires_reference and reference_key is None:
raise ValueError(
f'Must specify reference_key in smith_eval.RunEvalConfig to use evaluator of type {eval_type_tag} with dataset with multiple output keys: {example_outputs}.'
)
run_evaluator = (smith_eval.StringRunEvaluatorChain.
from_run_and_data_type(evaluator_, run_type, data_type,
input_key=input_key, prediction_key=prediction_key,
reference_key=reference_key, tags=[eval_type_tag]))
elif isinstance(evaluator_, PairwiseStringEvaluator):
raise NotImplementedError(
f"""Run evaluator for {eval_type_tag} is not implemented. PairwiseStringEvaluators compare the outputs of two different models rather than the output of a single model. Did you mean to use a StringEvaluator instead?
See: https://python.langchain.com/docs/guides/evaluation/string/"""
)
else:
raise NotImplementedError(
f'Run evaluator for {eval_type_tag} is not implemented')
return run_evaluator
| null |
add_constraint
|
"""
Add a constraint to the constraints list.
Args:
constraint (str): The constraint to be added.
"""
self.constraints.append(constraint)
|
def add_constraint(self, constraint: str) ->None:
"""
Add a constraint to the constraints list.
Args:
constraint (str): The constraint to be added.
"""
self.constraints.append(constraint)
|
Add a constraint to the constraints list.
Args:
constraint (str): The constraint to be added.
|
_persist_run
|
"""Run the evaluator on the run.
Parameters
----------
run : Run
The run to be evaluated.
"""
if self.skip_unfinished and not run.outputs:
logger.debug(f'Skipping unfinished run {run.id}')
return
run_ = run.copy()
run_.reference_example_id = self.example_id
for evaluator in self.evaluators:
if self.executor is None:
self._evaluate_in_project(run_, evaluator)
else:
self.futures.add(self.executor.submit(self._evaluate_in_project,
run_, evaluator))
|
def _persist_run(self, run: Run) ->None:
"""Run the evaluator on the run.
Parameters
----------
run : Run
The run to be evaluated.
"""
if self.skip_unfinished and not run.outputs:
logger.debug(f'Skipping unfinished run {run.id}')
return
run_ = run.copy()
run_.reference_example_id = self.example_id
for evaluator in self.evaluators:
if self.executor is None:
self._evaluate_in_project(run_, evaluator)
else:
self.futures.add(self.executor.submit(self._evaluate_in_project,
run_, evaluator))
|
Run the evaluator on the run.
Parameters
----------
run : Run
The run to be evaluated.
|
embed_query
|
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [float(1.0)] * 9 + [float(0.0)]
|
def embed_query(self, text: str) ->List[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [float(1.0)] * 9 + [float(0.0)]
|
Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents.
|
from_github_api_wrapper
|
operations: List[Dict] = [{'mode': 'get_issues', 'name': 'Get Issues',
'description': GET_ISSUES_PROMPT, 'args_schema': NoInput}, {'mode':
'get_issue', 'name': 'Get Issue', 'description': GET_ISSUE_PROMPT,
'args_schema': GetIssue}, {'mode': 'comment_on_issue', 'name':
'Comment on Issue', 'description': COMMENT_ON_ISSUE_PROMPT,
'args_schema': CommentOnIssue}, {'mode': 'list_open_pull_requests',
'name': 'List open pull requests (PRs)', 'description': LIST_PRS_PROMPT,
'args_schema': NoInput}, {'mode': 'get_pull_request', 'name':
'Get Pull Request', 'description': GET_PR_PROMPT, 'args_schema': GetPR},
{'mode': 'list_pull_request_files', 'name':
'Overview of files included in PR', 'description':
LIST_PULL_REQUEST_FILES, 'args_schema': GetPR}, {'mode':
'create_pull_request', 'name': 'Create Pull Request', 'description':
CREATE_PULL_REQUEST_PROMPT, 'args_schema': CreatePR}, {'mode':
'list_pull_request_files', 'name': "List Pull Requests' Files",
'description': LIST_PULL_REQUEST_FILES, 'args_schema': GetPR}, {'mode':
'create_file', 'name': 'Create File', 'description': CREATE_FILE_PROMPT,
'args_schema': CreateFile}, {'mode': 'read_file', 'name': 'Read File',
'description': READ_FILE_PROMPT, 'args_schema': ReadFile}, {'mode':
'update_file', 'name': 'Update File', 'description': UPDATE_FILE_PROMPT,
'args_schema': UpdateFile}, {'mode': 'delete_file', 'name':
'Delete File', 'description': DELETE_FILE_PROMPT, 'args_schema':
DeleteFile}, {'mode': 'list_files_in_main_branch', 'name':
'Overview of existing files in Main branch', 'description':
OVERVIEW_EXISTING_FILES_IN_MAIN, 'args_schema': NoInput}, {'mode':
'list_files_in_bot_branch', 'name':
'Overview of files in current working branch', 'description':
OVERVIEW_EXISTING_FILES_BOT_BRANCH, 'args_schema': NoInput}, {'mode':
'list_branches_in_repo', 'name': 'List branches in this repository',
'description': LIST_BRANCHES_IN_REPO_PROMPT, 'args_schema': NoInput}, {
'mode': 'set_active_branch', 'name': 'Set active branch', 'description':
SET_ACTIVE_BRANCH_PROMPT, 'args_schema': BranchName}, {'mode':
'create_branch', 'name': 'Create a new branch', 'description':
CREATE_BRANCH_PROMPT, 'args_schema': BranchName}, {'mode':
'get_files_from_directory', 'name': 'Get files from a directory',
'description': GET_FILES_FROM_DIRECTORY_PROMPT, 'args_schema':
DirectoryPath}, {'mode': 'search_issues_and_prs', 'name':
'Search issues and pull requests', 'description':
SEARCH_ISSUES_AND_PRS_PROMPT, 'args_schema': SearchIssuesAndPRs}, {
'mode': 'search_code', 'name': 'Search code', 'description':
SEARCH_CODE_PROMPT, 'args_schema': SearchCode}, {'mode':
'create_review_request', 'name': 'Create review request', 'description':
CREATE_REVIEW_REQUEST_PROMPT, 'args_schema': CreateReviewRequest}]
tools = [GitHubAction(name=action['name'], description=action['description'
], mode=action['mode'], api_wrapper=github_api_wrapper, args_schema=
action.get('args_schema', None)) for action in operations]
return cls(tools=tools)
|
@classmethod
def from_github_api_wrapper(cls, github_api_wrapper: GitHubAPIWrapper
) ->'GitHubToolkit':
operations: List[Dict] = [{'mode': 'get_issues', 'name': 'Get Issues',
'description': GET_ISSUES_PROMPT, 'args_schema': NoInput}, {'mode':
'get_issue', 'name': 'Get Issue', 'description': GET_ISSUE_PROMPT,
'args_schema': GetIssue}, {'mode': 'comment_on_issue', 'name':
'Comment on Issue', 'description': COMMENT_ON_ISSUE_PROMPT,
'args_schema': CommentOnIssue}, {'mode': 'list_open_pull_requests',
'name': 'List open pull requests (PRs)', 'description':
LIST_PRS_PROMPT, 'args_schema': NoInput}, {'mode':
'get_pull_request', 'name': 'Get Pull Request', 'description':
GET_PR_PROMPT, 'args_schema': GetPR}, {'mode':
'list_pull_request_files', 'name':
'Overview of files included in PR', 'description':
LIST_PULL_REQUEST_FILES, 'args_schema': GetPR}, {'mode':
'create_pull_request', 'name': 'Create Pull Request', 'description':
CREATE_PULL_REQUEST_PROMPT, 'args_schema': CreatePR}, {'mode':
'list_pull_request_files', 'name': "List Pull Requests' Files",
'description': LIST_PULL_REQUEST_FILES, 'args_schema': GetPR}, {
'mode': 'create_file', 'name': 'Create File', 'description':
CREATE_FILE_PROMPT, 'args_schema': CreateFile}, {'mode':
'read_file', 'name': 'Read File', 'description': READ_FILE_PROMPT,
'args_schema': ReadFile}, {'mode': 'update_file', 'name':
'Update File', 'description': UPDATE_FILE_PROMPT, 'args_schema':
UpdateFile}, {'mode': 'delete_file', 'name': 'Delete File',
'description': DELETE_FILE_PROMPT, 'args_schema': DeleteFile}, {
'mode': 'list_files_in_main_branch', 'name':
'Overview of existing files in Main branch', 'description':
OVERVIEW_EXISTING_FILES_IN_MAIN, 'args_schema': NoInput}, {'mode':
'list_files_in_bot_branch', 'name':
'Overview of files in current working branch', 'description':
OVERVIEW_EXISTING_FILES_BOT_BRANCH, 'args_schema': NoInput}, {
'mode': 'list_branches_in_repo', 'name':
'List branches in this repository', 'description':
LIST_BRANCHES_IN_REPO_PROMPT, 'args_schema': NoInput}, {'mode':
'set_active_branch', 'name': 'Set active branch', 'description':
SET_ACTIVE_BRANCH_PROMPT, 'args_schema': BranchName}, {'mode':
'create_branch', 'name': 'Create a new branch', 'description':
CREATE_BRANCH_PROMPT, 'args_schema': BranchName}, {'mode':
'get_files_from_directory', 'name': 'Get files from a directory',
'description': GET_FILES_FROM_DIRECTORY_PROMPT, 'args_schema':
DirectoryPath}, {'mode': 'search_issues_and_prs', 'name':
'Search issues and pull requests', 'description':
SEARCH_ISSUES_AND_PRS_PROMPT, 'args_schema': SearchIssuesAndPRs}, {
'mode': 'search_code', 'name': 'Search code', 'description':
SEARCH_CODE_PROMPT, 'args_schema': SearchCode}, {'mode':
'create_review_request', 'name': 'Create review request',
'description': CREATE_REVIEW_REQUEST_PROMPT, 'args_schema':
CreateReviewRequest}]
tools = [GitHubAction(name=action['name'], description=action[
'description'], mode=action['mode'], api_wrapper=github_api_wrapper,
args_schema=action.get('args_schema', None)) for action in operations]
return cls(tools=tools)
| null |
get_allowed_tools
|
"""Get allowed tools."""
return [t.name for t in self.tools]
|
def get_allowed_tools(self) ->List[str]:
"""Get allowed tools."""
return [t.name for t in self.tools]
|
Get allowed tools.
|
on_text
|
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end, file=self.file)
|
def on_text(self, text: str, color: Optional[str]=None, end: str='', **
kwargs: Any) ->None:
"""Run when agent ends."""
print_text(text, color=color or self.color, end=end, file=self.file)
|
Run when agent ends.
|
test_bs_html_loader
|
"""Test unstructured loader."""
file_path = EXAMPLES / 'example.html'
loader = BSHTMLLoader(str(file_path), get_text_separator='|')
docs = loader.load()
assert len(docs) == 1
metadata = docs[0].metadata
content = docs[0].page_content
assert metadata['title'] == "Chew dad's slippers"
assert metadata['source'] == str(file_path)
assert content[:2] == '\n|'
|
@pytest.mark.requires('bs4', 'lxml')
def test_bs_html_loader() ->None:
"""Test unstructured loader."""
file_path = EXAMPLES / 'example.html'
loader = BSHTMLLoader(str(file_path), get_text_separator='|')
docs = loader.load()
assert len(docs) == 1
metadata = docs[0].metadata
content = docs[0].page_content
assert metadata['title'] == "Chew dad's slippers"
assert metadata['source'] == str(file_path)
assert content[:2] == '\n|'
|
Test unstructured loader.
|
output_keys
|
"""Return output key.
:meta private:
"""
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
|
Return output key.
:meta private:
|
test_delete_by_path
|
"""Test delete dataset."""
import deeplake
path = deeplake_datastore.dataset_path
DeepLake.force_delete_by_path(path)
assert not deeplake.exists(path)
|
def test_delete_by_path(deeplake_datastore: DeepLake) ->None:
"""Test delete dataset."""
import deeplake
path = deeplake_datastore.dataset_path
DeepLake.force_delete_by_path(path)
assert not deeplake.exists(path)
|
Test delete dataset.
|
test_singlestoredb_as_retriever
|
table_name = 'test_singlestoredb_8'
drop(table_name)
docsearch = SingleStoreDB.from_texts(texts, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
retriever = docsearch.as_retriever(search_kwargs={'k': 2})
output = retriever.get_relevant_documents('foo')
assert output == [Document(page_content='foo'), Document(page_content='bar')]
drop(table_name)
|
@pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_as_retriever(texts: List[str]) ->None:
table_name = 'test_singlestoredb_8'
drop(table_name)
docsearch = SingleStoreDB.from_texts(texts, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
retriever = docsearch.as_retriever(search_kwargs={'k': 2})
output = retriever.get_relevant_documents('foo')
assert output == [Document(page_content='foo'), Document(page_content=
'bar')]
drop(table_name)
| null |
__from
|
scann = dependable_scann_import()
distance_strategy = kwargs.get('distance_strategy', DistanceStrategy.
EUCLIDEAN_DISTANCE)
scann_config = kwargs.get('scann_config', None)
vector = np.array(embeddings, dtype=np.float32)
if normalize_L2:
vector = normalize(vector)
if scann_config is not None:
index = scann.scann_ops_pybind.create_searcher(vector, scann_config)
elif distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
index = scann.scann_ops_pybind.builder(vector, 1, 'dot_product'
).score_brute_force().build()
else:
index = scann.scann_ops_pybind.builder(vector, 1, 'squared_l2'
).score_brute_force().build()
documents = []
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = dict(enumerate(ids))
if len(index_to_id) != len(documents):
raise Exception(
f'{len(index_to_id)} ids provided for {len(documents)} documents. Each document should have an id.'
)
docstore = InMemoryDocstore(dict(zip(index_to_id.values(), documents)))
return cls(embedding, index, docstore, index_to_id, normalize_L2=
normalize_L2, **kwargs)
|
@classmethod
def __from(cls, texts: List[str], embeddings: List[List[float]], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[
str]]=None, normalize_L2: bool=False, **kwargs: Any) ->ScaNN:
scann = dependable_scann_import()
distance_strategy = kwargs.get('distance_strategy', DistanceStrategy.
EUCLIDEAN_DISTANCE)
scann_config = kwargs.get('scann_config', None)
vector = np.array(embeddings, dtype=np.float32)
if normalize_L2:
vector = normalize(vector)
if scann_config is not None:
index = scann.scann_ops_pybind.create_searcher(vector, scann_config)
elif distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
index = scann.scann_ops_pybind.builder(vector, 1, 'dot_product'
).score_brute_force().build()
else:
index = scann.scann_ops_pybind.builder(vector, 1, 'squared_l2'
).score_brute_force().build()
documents = []
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = dict(enumerate(ids))
if len(index_to_id) != len(documents):
raise Exception(
f'{len(index_to_id)} ids provided for {len(documents)} documents. Each document should have an id.'
)
docstore = InMemoryDocstore(dict(zip(index_to_id.values(), documents)))
return cls(embedding, index, docstore, index_to_id, normalize_L2=
normalize_L2, **kwargs)
| null |
embed_documents
|
return [self.embed_query(txt) for txt in texts]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
return [self.embed_query(txt) for txt in texts]
| null |
lazy_load
|
yield from self._tfds_client.lazy_load()
|
def lazy_load(self) ->Iterator[Document]:
yield from self._tfds_client.lazy_load()
| null |
add_example
|
"""Add new example to store."""
|
@abstractmethod
def add_example(self, example: Dict[str, str]) ->Any:
"""Add new example to store."""
|
Add new example to store.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_unstructured_rst_loader
|
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, 'README.rst')
loader = UnstructuredRSTLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
def test_unstructured_rst_loader() ->None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, 'README.rst')
loader = UnstructuredRSTLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
Test unstructured loader.
|
test_color_question_2
|
"""Test simple question."""
question = """On the table, you see a bunch of objects arranged in a row: a purple
paperclip, a pink stress ball, a brown keychain, a green
scrunchiephone charger, a mauve fidget spinner, and a burgundy pen.
What is the color of the object directly to the right of
the stress ball?"""
prompt = COLORED_OBJECT_PROMPT.format(question=question)
queries = {prompt: _COLORED_OBJECT_SOLUTION_2}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_colored_object_prompt(fake_llm, timeout=None)
output = fake_pal_chain.run(question)
assert output == 'brown'
|
def test_color_question_2() ->None:
"""Test simple question."""
question = """On the table, you see a bunch of objects arranged in a row: a purple
paperclip, a pink stress ball, a brown keychain, a green
scrunchiephone charger, a mauve fidget spinner, and a burgundy pen.
What is the color of the object directly to the right of
the stress ball?"""
prompt = COLORED_OBJECT_PROMPT.format(question=question)
queries = {prompt: _COLORED_OBJECT_SOLUTION_2}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_colored_object_prompt(fake_llm, timeout=None
)
output = fake_pal_chain.run(question)
assert output == 'brown'
|
Test simple question.
|
_evaluate_strings
|
result = self({'query': input, 'context': reference, 'result': prediction},
callbacks=callbacks, include_run_info=include_run_info)
return self._prepare_output(result)
|
def _evaluate_strings(self, *, prediction: str, reference: Optional[str]=
None, input: Optional[str]=None, callbacks: Callbacks=None,
include_run_info: bool=False, **kwargs: Any) ->dict:
result = self({'query': input, 'context': reference, 'result':
prediction}, callbacks=callbacks, include_run_info=include_run_info)
return self._prepare_output(result)
| null |
_import_modal
|
from langchain_community.llms.modal import Modal
return Modal
|
def _import_modal() ->Any:
from langchain_community.llms.modal import Modal
return Modal
| null |
test_tongyi_generate
|
"""Test valid call to tongyi."""
llm = Tongyi()
output = llm.generate(['who are you'])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
def test_tongyi_generate() ->None:
"""Test valid call to tongyi."""
llm = Tongyi()
output = llm.generate(['who are you'])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
Test valid call to tongyi.
|
_is_b64
|
return s.startswith('data:image')
|
def _is_b64(s: str) ->bool:
return s.startswith('data:image')
| null |
test_search_mmr
|
r = zep_vectorstore.search(query='Test Document', search_type='mmr', k=1)
assert len(r) == 1
assert r[0].page_content == 'Test Document'
assert r[0].metadata == {'key': 'value'}
|
@pytest.mark.requires('zep_python')
def test_search_mmr(zep_vectorstore: ZepVectorStore) ->None:
r = zep_vectorstore.search(query='Test Document', search_type='mmr', k=1)
assert len(r) == 1
assert r[0].page_content == 'Test Document'
assert r[0].metadata == {'key': 'value'}
| null |
__init__
|
"""Initialize with Xata client."""
try:
from xata.client import XataClient
except ImportError:
raise ValueError(
'Could not import xata python package. Please install it with `pip install xata`.'
)
self._client = XataClient(api_key=api_key, db_url=db_url, branch_name=
branch_name)
self._table_name = table_name
self._session_id = session_id
if create_table:
self._create_table_if_not_exists()
|
def __init__(self, session_id: str, db_url: str, api_key: str, branch_name:
str='main', table_name: str='messages', create_table: bool=True) ->None:
"""Initialize with Xata client."""
try:
from xata.client import XataClient
except ImportError:
raise ValueError(
'Could not import xata python package. Please install it with `pip install xata`.'
)
self._client = XataClient(api_key=api_key, db_url=db_url, branch_name=
branch_name)
self._table_name = table_name
self._session_id = session_id
if create_table:
self._create_table_if_not_exists()
|
Initialize with Xata client.
|
test_chat_fireworks_system_message
|
"""Test ChatFireworks wrapper with system message."""
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
@pytest.mark.scheduled
def test_chat_fireworks_system_message(chat: ChatFireworks) ->None:
"""Test ChatFireworks wrapper with system message."""
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test ChatFireworks wrapper with system message.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
update_task_assignees
|
"""
Add or remove assignees of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {'Error': error}
for user in query_dict['users']:
if not isinstance(user, int):
return {'Error':
f"""All users must be integers, not strings!
"Got user {user} if type {type(user)}"""
}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
headers = self.get_headers()
if query_dict['operation'] == 'add':
assigne_payload = {'add': query_dict['users'], 'rem': []}
elif query_dict['operation'] == 'rem':
assigne_payload = {'add': [], 'rem': query_dict['users']}
else:
raise ValueError(f"Invalid operation ({query_dict['operation']}). ",
"Valid options ['add', 'rem'].")
params = {'custom_task_ids': 'true', 'team_id': self.team_id,
'include_subtasks': 'true'}
payload = {'assignees': assigne_payload}
response = requests.put(url, headers=headers, params=params, json=payload)
return {'response': response}
|
def update_task_assignees(self, query: str) ->Dict:
"""
Add or remove assignees of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {'Error': error}
for user in query_dict['users']:
if not isinstance(user, int):
return {'Error':
f"""All users must be integers, not strings!
"Got user {user} if type {type(user)}"""
}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
headers = self.get_headers()
if query_dict['operation'] == 'add':
assigne_payload = {'add': query_dict['users'], 'rem': []}
elif query_dict['operation'] == 'rem':
assigne_payload = {'add': [], 'rem': query_dict['users']}
else:
raise ValueError(f"Invalid operation ({query_dict['operation']}). ",
"Valid options ['add', 'rem'].")
params = {'custom_task_ids': 'true', 'team_id': self.team_id,
'include_subtasks': 'true'}
payload = {'assignees': assigne_payload}
response = requests.put(url, headers=headers, params=params, json=payload)
return {'response': response}
|
Add or remove assignees of a specified task.
|
lazy_parse
|
"""Lazily parse the blob."""
try:
from speechkit import configure_credentials, creds, model_repository
from speechkit.stt import AudioProcessingType
except ImportError:
raise ImportError(
'yandex-speechkit package not found, please install it with `pip install yandex-speechkit`'
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
'pydub package not found, please install it with `pip install pydub`')
if self.api_key:
configure_credentials(yandex_credentials=creds.YandexCredentials(
api_key=self.api_key))
else:
configure_credentials(yandex_credentials=creds.YandexCredentials(
iam_token=self.iam_token))
audio = AudioSegment.from_file(blob.path)
model = model_repository.recognition_model()
model.model = self.model
model.language = self.language
model.audio_processing_type = AudioProcessingType.Full
result = model.transcribe(audio)
for res in result:
yield Document(page_content=res.normalized_text, metadata={'source':
blob.source})
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazily parse the blob."""
try:
from speechkit import configure_credentials, creds, model_repository
from speechkit.stt import AudioProcessingType
except ImportError:
raise ImportError(
'yandex-speechkit package not found, please install it with `pip install yandex-speechkit`'
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
'pydub package not found, please install it with `pip install pydub`'
)
if self.api_key:
configure_credentials(yandex_credentials=creds.YandexCredentials(
api_key=self.api_key))
else:
configure_credentials(yandex_credentials=creds.YandexCredentials(
iam_token=self.iam_token))
audio = AudioSegment.from_file(blob.path)
model = model_repository.recognition_model()
model.model = self.model
model.language = self.language
model.audio_processing_type = AudioProcessingType.Full
result = model.transcribe(audio)
for res in result:
yield Document(page_content=res.normalized_text, metadata={'source':
blob.source})
|
Lazily parse the blob.
|
on_llm_start
|
"""Store the prompts"""
self.prompts = prompts
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Store the prompts"""
self.prompts = prompts
|
Store the prompts
|
get_id_link_price
|
"""The response may contain more than one game, so we need to choose the right
one and return the id."""
game_info = {}
for app in games['apps']:
game_info['id'] = app['id']
game_info['link'] = app['link']
game_info['price'] = app['price']
break
return game_info
|
def get_id_link_price(self, games: dict) ->dict:
"""The response may contain more than one game, so we need to choose the right
one and return the id."""
game_info = {}
for app in games['apps']:
game_info['id'] = app['id']
game_info['link'] = app['link']
game_info['price'] = app['price']
break
return game_info
|
The response may contain more than one game, so we need to choose the right
one and return the id.
|
_construct_documents_from_results_with_score
|
"""Helper to convert Marqo results into documents.
Args:
results (List[dict]): A marqo results object with the 'hits'.
include_scores (bool, optional): Include scores alongside documents.
Defaults to False.
Returns:
Union[List[Document], List[Tuple[Document, float]]]: The documents or
document score pairs if `include_scores` is true.
"""
documents: List[Tuple[Document, Any]] = []
for res in results['hits']:
if self.page_content_builder is None:
text = res['text']
else:
text = self.page_content_builder(res)
metadata = json.loads(res.get('metadata', '{}'))
documents.append((Document(page_content=text, metadata=metadata), res[
'_score']))
return documents
|
def _construct_documents_from_results_with_score(self, results: Dict[str,
List[Dict[str, str]]]) ->List[Tuple[Document, Any]]:
"""Helper to convert Marqo results into documents.
Args:
results (List[dict]): A marqo results object with the 'hits'.
include_scores (bool, optional): Include scores alongside documents.
Defaults to False.
Returns:
Union[List[Document], List[Tuple[Document, float]]]: The documents or
document score pairs if `include_scores` is true.
"""
documents: List[Tuple[Document, Any]] = []
for res in results['hits']:
if self.page_content_builder is None:
text = res['text']
else:
text = self.page_content_builder(res)
metadata = json.loads(res.get('metadata', '{}'))
documents.append((Document(page_content=text, metadata=metadata),
res['_score']))
return documents
|
Helper to convert Marqo results into documents.
Args:
results (List[dict]): A marqo results object with the 'hits'.
include_scores (bool, optional): Include scores alongside documents.
Defaults to False.
Returns:
Union[List[Document], List[Tuple[Document, float]]]: The documents or
document score pairs if `include_scores` is true.
|
_call
|
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
llm("Tell me a joke.")
"""
if self.streaming:
combined_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs):
combined_output += chunk.text
text = combined_output
else:
text = self.pipeline(sequences=prompt, **self.generation_config
).generations[0].text
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
llm("Tell me a joke.")
"""
if self.streaming:
combined_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs):
combined_output += chunk.text
text = combined_output
else:
text = self.pipeline(sequences=prompt, **self.generation_config
).generations[0].text
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
llm("Tell me a joke.")
|
get_thread_ids_by_folder_id
|
"""Get thread ids by folder id and update in thread_ids"""
from quip_api.quip import HTTPError, QuipError
try:
folder = self.quip_client.get_folder(folder_id)
except QuipError as e:
if e.code == 403:
logging.warning(
f'depth {depth}, Skipped over restricted folder {folder_id}, {e}')
else:
logging.warning(
f'depth {depth}, Skipped over folder {folder_id} due to unknown error {e.code}'
)
return
except HTTPError as e:
logging.warning(
f'depth {depth}, Skipped over folder {folder_id} due to HTTP error {e.code}'
)
return
title = folder['folder'].get('title', 'Folder %s' % folder_id)
logging.info(f'depth {depth}, Processing folder {title}')
for child in folder['children']:
if 'folder_id' in child:
self.get_thread_ids_by_folder_id(child['folder_id'], depth + 1,
thread_ids)
elif 'thread_id' in child:
thread_ids.append(child['thread_id'])
|
def get_thread_ids_by_folder_id(self, folder_id: str, depth: int,
thread_ids: List[str]) ->None:
"""Get thread ids by folder id and update in thread_ids"""
from quip_api.quip import HTTPError, QuipError
try:
folder = self.quip_client.get_folder(folder_id)
except QuipError as e:
if e.code == 403:
logging.warning(
f'depth {depth}, Skipped over restricted folder {folder_id}, {e}'
)
else:
logging.warning(
f'depth {depth}, Skipped over folder {folder_id} due to unknown error {e.code}'
)
return
except HTTPError as e:
logging.warning(
f'depth {depth}, Skipped over folder {folder_id} due to HTTP error {e.code}'
)
return
title = folder['folder'].get('title', 'Folder %s' % folder_id)
logging.info(f'depth {depth}, Processing folder {title}')
for child in folder['children']:
if 'folder_id' in child:
self.get_thread_ids_by_folder_id(child['folder_id'], depth + 1,
thread_ids)
elif 'thread_id' in child:
thread_ids.append(child['thread_id'])
|
Get thread ids by folder id and update in thread_ids
|
_assert_dependency_equals
|
assert dep['git'] == git
assert dep['ref'] == ref
assert dep['subdirectory'] == subdirectory
if event_metadata is not None:
assert dep['event_metadata'] == event_metadata
|
def _assert_dependency_equals(dep: DependencySource, *, git: Optional[str]=
None, ref: Optional[str]=None, subdirectory: Optional[str]=None,
event_metadata: Optional[Dict]=None) ->None:
assert dep['git'] == git
assert dep['ref'] == ref
assert dep['subdirectory'] == subdirectory
if event_metadata is not None:
assert dep['event_metadata'] == event_metadata
| null |
invoke
|
return self._call_with_config(self._invoke, input, config, **kwargs)
|
def invoke(self, input: List[Input], config: Optional[RunnableConfig]=None,
**kwargs: Any) ->List[Output]:
return self._call_with_config(self._invoke, input, config, **kwargs)
| null |
add_prefix
|
if prefix is None:
return element_tag
return f'{prefix}.{element_tag}'
|
def add_prefix(element_tag: str) ->str:
if prefix is None:
return element_tag
return f'{prefix}.{element_tag}'
| null |
test_redis
|
"""Test end to end construction and search."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
output = docsearch.similarity_search('foo', k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
assert drop(docsearch.index_name)
|
def test_redis(texts: List[str]) ->None:
"""Test end to end construction and search."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL)
output = docsearch.similarity_search('foo', k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
assert drop(docsearch.index_name)
|
Test end to end construction and search.
|
lc_serializable
|
return True
|
@property
def lc_serializable(self) ->bool:
return True
| null |
get_output_schema
|
return create_model(self.get_name('Output'), **{k: (v.OutputType, None) for
k, v in self.steps.items()}, __config__=_SchemaConfig)
|
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return create_model(self.get_name('Output'), **{k: (v.OutputType, None) for
k, v in self.steps.items()}, __config__=_SchemaConfig)
| null |
test_chat_openai_generate
|
"""Test ChatOpenAI wrapper with generate."""
chat = ChatOpenAI(max_tokens=10, n=2)
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
assert response.llm_output
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
@pytest.mark.scheduled
def test_chat_openai_generate() ->None:
"""Test ChatOpenAI wrapper with generate."""
chat = ChatOpenAI(max_tokens=10, n=2)
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
assert response.llm_output
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
Test ChatOpenAI wrapper with generate.
|
_import_edenai_EdenAiTextToSpeechTool
|
from langchain_community.tools.edenai import EdenAiTextToSpeechTool
return EdenAiTextToSpeechTool
|
def _import_edenai_EdenAiTextToSpeechTool() ->Any:
from langchain_community.tools.edenai import EdenAiTextToSpeechTool
return EdenAiTextToSpeechTool
| null |
test_invalid_filter
|
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
with pytest.raises(ValueError) as error_invalid_filter:
EtherscanLoader(account_address, filter='internal_saction')
assert str(error_invalid_filter.value) == 'Invalid filter internal_saction'
|
@pytest.mark.skipif(not etherscan_key_set, reason=
'Etherscan API key not provided.')
def test_invalid_filter() ->None:
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
with pytest.raises(ValueError) as error_invalid_filter:
EtherscanLoader(account_address, filter='internal_saction')
assert str(error_invalid_filter.value) == 'Invalid filter internal_saction'
| null |
batch_support_chroma_version
|
try:
import chromadb
except Exception:
return False
major, minor, patch = chromadb.__version__.split('.')
if int(major) == 0 and int(minor) >= 4 and int(patch) >= 10:
return True
return False
|
def batch_support_chroma_version() ->bool:
try:
import chromadb
except Exception:
return False
major, minor, patch = chromadb.__version__.split('.')
if int(major) == 0 and int(minor) >= 4 and int(patch) >= 10:
return True
return False
| null |
test_ignore_links
|
html2text_transformer = Html2TextTransformer(ignore_links=False)
multiple_tags_html = (
"<h1>First heading.</h1><p>First paragraph with an <a href='http://example.com'>example</a></p>"
)
documents = [Document(page_content=multiple_tags_html)]
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == """# First heading.
First paragraph with an [example](http://example.com)
"""
html2text_transformer = Html2TextTransformer(ignore_links=True)
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == """# First heading.
First paragraph with an example
"""
|
@pytest.mark.requires('html2text')
def test_ignore_links() ->None:
html2text_transformer = Html2TextTransformer(ignore_links=False)
multiple_tags_html = (
"<h1>First heading.</h1><p>First paragraph with an <a href='http://example.com'>example</a></p>"
)
documents = [Document(page_content=multiple_tags_html)]
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == """# First heading.
First paragraph with an [example](http://example.com)
"""
html2text_transformer = Html2TextTransformer(ignore_links=True)
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == """# First heading.
First paragraph with an example
"""
| null |
web_path
|
if len(self.web_paths) > 1:
raise ValueError('Multiple webpaths found.')
return self.web_paths[0]
|
@property
def web_path(self) ->str:
if len(self.web_paths) > 1:
raise ValueError('Multiple webpaths found.')
return self.web_paths[0]
| null |
_stream
|
"""Call out to Titan Takeoff (Pro) stream endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
url = f'{self.base_url}/generate_stream'
params = {'text': prompt, **self._default_params}
response = requests.post(url, json=params, stream=True)
response.encoding = 'utf-8'
buffer = ''
for text in response.iter_content(chunk_size=1, decode_unicode=True):
buffer += text
if 'data:' in buffer:
if buffer.startswith('data:'):
buffer = ''
if len(buffer.split('data:', 1)) == 2:
content, _ = buffer.split('data:', 1)
buffer = content.rstrip('\n')
if buffer:
chunk = GenerationChunk(text=buffer)
buffer = ''
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
if buffer:
chunk = GenerationChunk(text=buffer.replace('</s>', ''))
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
|
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
"""Call out to Titan Takeoff (Pro) stream endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
url = f'{self.base_url}/generate_stream'
params = {'text': prompt, **self._default_params}
response = requests.post(url, json=params, stream=True)
response.encoding = 'utf-8'
buffer = ''
for text in response.iter_content(chunk_size=1, decode_unicode=True):
buffer += text
if 'data:' in buffer:
if buffer.startswith('data:'):
buffer = ''
if len(buffer.split('data:', 1)) == 2:
content, _ = buffer.split('data:', 1)
buffer = content.rstrip('\n')
if buffer:
chunk = GenerationChunk(text=buffer)
buffer = ''
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
if buffer:
chunk = GenerationChunk(text=buffer.replace('</s>', ''))
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
|
Call out to Titan Takeoff (Pro) stream endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
|
add_texts
|
"""
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = None
if self.embedding_func is not None:
embeddings = self.embedding_func.embed_documents(list(texts))
if embeddings is None:
raise ValueError('embeddings is None')
if self.flag:
dbs_list = self.vearch.list_dbs()
if self.using_db_name not in dbs_list:
create_db_code = self.vearch.create_db(self.using_db_name)
if not create_db_code:
raise ValueError('create db failed!!!')
space_list = self.vearch.list_spaces(self.using_db_name)
if self.using_table_name not in space_list:
create_space_code = self._create_space(len(embeddings[0]))
if not create_space_code:
raise ValueError('create space failed!!!')
docid = []
if embeddings is not None and metadatas is not None:
for text, metadata, embed in zip(texts, metadatas, embeddings):
profiles: dict[str, Any] = {}
profiles['text'] = text
profiles['metadata'] = metadata['source']
embed_np = np.array(embed)
profiles['text_embedding'] = {'feature': (embed_np / np.linalg.
norm(embed_np)).tolist()}
insert_res = self.vearch.insert_one(self.using_db_name, self.
using_table_name, profiles)
if insert_res['status'] == 200:
docid.append(insert_res['_id'])
continue
else:
retry_insert = self.vearch.insert_one(self.using_db_name,
self.using_table_name, profiles)
docid.append(retry_insert['_id'])
continue
else:
table_path = os.path.join(self.using_metapath, self.using_table_name +
'.schema')
if not os.path.exists(table_path):
dim = len(embeddings[0])
response_code = self._create_table(dim)
if response_code:
raise ValueError('create table failed!!!')
if embeddings is not None and metadatas is not None:
doc_items = []
for text, metadata, embed in zip(texts, metadatas, embeddings):
profiles_v: dict[str, Any] = {}
profiles_v['text'] = text
profiles_v['metadata'] = metadata['source']
embed_np = np.array(embed)
profiles_v['text_embedding'] = embed_np / np.linalg.norm(embed_np)
doc_items.append(profiles_v)
docid = self.vearch.add(doc_items)
t_time = 0
while len(docid) != len(embeddings):
time.sleep(0.5)
if t_time > 6:
break
t_time += 1
self.vearch.dump()
return docid
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = None
if self.embedding_func is not None:
embeddings = self.embedding_func.embed_documents(list(texts))
if embeddings is None:
raise ValueError('embeddings is None')
if self.flag:
dbs_list = self.vearch.list_dbs()
if self.using_db_name not in dbs_list:
create_db_code = self.vearch.create_db(self.using_db_name)
if not create_db_code:
raise ValueError('create db failed!!!')
space_list = self.vearch.list_spaces(self.using_db_name)
if self.using_table_name not in space_list:
create_space_code = self._create_space(len(embeddings[0]))
if not create_space_code:
raise ValueError('create space failed!!!')
docid = []
if embeddings is not None and metadatas is not None:
for text, metadata, embed in zip(texts, metadatas, embeddings):
profiles: dict[str, Any] = {}
profiles['text'] = text
profiles['metadata'] = metadata['source']
embed_np = np.array(embed)
profiles['text_embedding'] = {'feature': (embed_np / np.
linalg.norm(embed_np)).tolist()}
insert_res = self.vearch.insert_one(self.using_db_name,
self.using_table_name, profiles)
if insert_res['status'] == 200:
docid.append(insert_res['_id'])
continue
else:
retry_insert = self.vearch.insert_one(self.
using_db_name, self.using_table_name, profiles)
docid.append(retry_insert['_id'])
continue
else:
table_path = os.path.join(self.using_metapath, self.
using_table_name + '.schema')
if not os.path.exists(table_path):
dim = len(embeddings[0])
response_code = self._create_table(dim)
if response_code:
raise ValueError('create table failed!!!')
if embeddings is not None and metadatas is not None:
doc_items = []
for text, metadata, embed in zip(texts, metadatas, embeddings):
profiles_v: dict[str, Any] = {}
profiles_v['text'] = text
profiles_v['metadata'] = metadata['source']
embed_np = np.array(embed)
profiles_v['text_embedding'] = embed_np / np.linalg.norm(
embed_np)
doc_items.append(profiles_v)
docid = self.vearch.add(doc_items)
t_time = 0
while len(docid) != len(embeddings):
time.sleep(0.5)
if t_time > 6:
break
t_time += 1
self.vearch.dump()
return docid
|
Returns:
List of ids from adding the texts into the vectorstore.
|
_get_embeddings_with_retry
|
"""Makes a Vertex AI model request with retry logic."""
from google.api_core.exceptions import Aborted, DeadlineExceeded, ResourceExhausted, ServiceUnavailable
errors = [ResourceExhausted, ServiceUnavailable, Aborted, DeadlineExceeded]
retry_decorator = create_base_retry_decorator(error_types=errors,
max_retries=self.max_retries)
@retry_decorator
def _completion_with_retry(texts_to_process: List[str]) ->Any:
if embeddings_type and self.instance['embeddings_task_type_supported']:
from vertexai.language_models import TextEmbeddingInput
requests = [TextEmbeddingInput(text=t, task_type=embeddings_type) for
t in texts_to_process]
else:
requests = texts_to_process
embeddings = self.client.get_embeddings(requests)
return [embs.values for embs in embeddings]
return _completion_with_retry(texts)
|
def _get_embeddings_with_retry(self, texts: List[str], embeddings_type:
Optional[str]=None) ->List[List[float]]:
"""Makes a Vertex AI model request with retry logic."""
from google.api_core.exceptions import Aborted, DeadlineExceeded, ResourceExhausted, ServiceUnavailable
errors = [ResourceExhausted, ServiceUnavailable, Aborted, DeadlineExceeded]
retry_decorator = create_base_retry_decorator(error_types=errors,
max_retries=self.max_retries)
@retry_decorator
def _completion_with_retry(texts_to_process: List[str]) ->Any:
if embeddings_type and self.instance['embeddings_task_type_supported']:
from vertexai.language_models import TextEmbeddingInput
requests = [TextEmbeddingInput(text=t, task_type=
embeddings_type) for t in texts_to_process]
else:
requests = texts_to_process
embeddings = self.client.get_embeddings(requests)
return [embs.values for embs in embeddings]
return _completion_with_retry(texts)
|
Makes a Vertex AI model request with retry logic.
|
test_json_distance_evaluator_evaluate_strings_list_same
|
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"b": 2, "a": 1}, {"b": 3, "a": 2}]'
result = json_distance_evaluator._evaluate_strings(prediction=prediction,
reference=reference)
assert result['score'] == 0
|
@pytest.mark.requires('rapidfuzz')
def test_json_distance_evaluator_evaluate_strings_list_same(
json_distance_evaluator: JsonEditDistanceEvaluator) ->None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"b": 2, "a": 1}, {"b": 3, "a": 2}]'
result = json_distance_evaluator._evaluate_strings(prediction=
prediction, reference=reference)
assert result['score'] == 0
| null |
_describe_image
|
headers = {'x-api-key': f'token {self.scenex_api_key}', 'content-type':
'application/json'}
payload = {'data': [{'image': image, 'algorithm': 'Ember', 'languages': [
'en']}]}
response = requests.post(self.scenex_api_url, headers=headers, json=payload)
response.raise_for_status()
result = response.json().get('result', [])
img = result[0] if result else {}
return img.get('text', '')
|
def _describe_image(self, image: str) ->str:
headers = {'x-api-key': f'token {self.scenex_api_key}', 'content-type':
'application/json'}
payload = {'data': [{'image': image, 'algorithm': 'Ember', 'languages':
['en']}]}
response = requests.post(self.scenex_api_url, headers=headers, json=payload
)
response.raise_for_status()
result = response.json().get('result', [])
img = result[0] if result else {}
return img.get('text', '')
| null |
test_mime_type_inference
|
"""Tests mimetype inference based on options and path."""
blob = Blob.from_path(path, mime_type=mime_type, guess_type=guess_type)
assert blob.mimetype == expected_mime_type
|
@pytest.mark.parametrize('path, mime_type, guess_type, expected_mime_type',
[('test.txt', None, True, 'text/plain'), ('test.txt', None, False, None
), ('test.html', None, True, 'text/html'), ('test.html', None, False,
None), ('test.html', 'user_forced_value', True, 'user_forced_value'), (
Path('test.html'), 'user_forced_value', True, 'user_forced_value'), (
Path('test.html'), None, True, 'text/html')])
def test_mime_type_inference(path: PathLike, mime_type: str, guess_type:
bool, expected_mime_type: Optional[str]) ->None:
"""Tests mimetype inference based on options and path."""
blob = Blob.from_path(path, mime_type=mime_type, guess_type=guess_type)
assert blob.mimetype == expected_mime_type
|
Tests mimetype inference based on options and path.
|
output_keys
|
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ['intermediate_steps']
return _output_keys
|
@property
def output_keys(self) ->List[str]:
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ['intermediate_steps']
return _output_keys
|
Expect input key.
:meta private:
|
test_empty_board
|
"""
Test loading a board with no cards.
"""
trello_loader = TrelloLoader.from_credentials('Research', api_key='API_KEY',
token='API_TOKEN')
documents = trello_loader.load()
self.assertEqual(len(documents), 0, 'Empty board returns an empty list.')
|
def test_empty_board(self) ->None:
"""
Test loading a board with no cards.
"""
trello_loader = TrelloLoader.from_credentials('Research', api_key=
'API_KEY', token='API_TOKEN')
documents = trello_loader.load()
self.assertEqual(len(documents), 0, 'Empty board returns an empty list.')
|
Test loading a board with no cards.
|
validate_environment
|
"""Validate that python package exists in environment."""
try:
from manifest import Manifest
if not isinstance(values['client'], Manifest):
raise ValueError
except ImportError:
raise ImportError(
'Could not import manifest python package. Please install it with `pip install manifest-ml`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that python package exists in environment."""
try:
from manifest import Manifest
if not isinstance(values['client'], Manifest):
raise ValueError
except ImportError:
raise ImportError(
'Could not import manifest python package. Please install it with `pip install manifest-ml`.'
)
return values
|
Validate that python package exists in environment.
|
flatten
|
"""Utility to recursively flatten a list of child runs in a run.
:param child_runs: The list of child runs to flatten.
:return: The flattened list of runs.
"""
if child_runs is None:
return []
result = []
for item in child_runs:
child_runs = item.pop('child_runs', [])
result.append(item)
result.extend(flatten(child_runs))
return result
|
def flatten(child_runs: List[Dict[str, Any]]) ->List[Dict[str, Any]]:
"""Utility to recursively flatten a list of child runs in a run.
:param child_runs: The list of child runs to flatten.
:return: The flattened list of runs.
"""
if child_runs is None:
return []
result = []
for item in child_runs:
child_runs = item.pop('child_runs', [])
result.append(item)
result.extend(flatten(child_runs))
return result
|
Utility to recursively flatten a list of child runs in a run.
:param child_runs: The list of child runs to flatten.
:return: The flattened list of runs.
|
evaluation_name
|
return 'json_equality'
|
@property
def evaluation_name(self) ->str:
return 'json_equality'
| null |
on_chat_model_start
|
"""Run when LLM starts running."""
values = serialized.get('id')
if values:
for value in values:
if value == 'ChatOpenAI':
self.is_chat_openai_model = True
break
if self.is_chat_openai_model:
invocation_params = kwargs.get('invocation_params')
if invocation_params:
model_name = invocation_params.get('model_name')
if model_name:
self.chat_openai_model_name = model_name
prompt_tokens = 0
for message_list in messages:
message_string = ' '.join(cast(str, msg.content) for msg in
message_list)
num_tokens = get_num_tokens(message_string,
openai_model_name=self.chat_openai_model_name)
prompt_tokens += num_tokens
self._send_to_infino('prompt_tokens', prompt_tokens)
if self.verbose:
print(
f'on_chat_model_start: is_chat_openai_model= {self.is_chat_openai_model}, chat_openai_model_name={self.chat_openai_model_name}'
)
prompt = ' '.join(cast(str, msg.content) for sublist in messages for msg in
sublist)
self._send_to_infino('prompt', prompt, is_ts=False)
self.error = 0
self.start_time = time.time()
|
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[
List[BaseMessage]], **kwargs: Any) ->None:
"""Run when LLM starts running."""
values = serialized.get('id')
if values:
for value in values:
if value == 'ChatOpenAI':
self.is_chat_openai_model = True
break
if self.is_chat_openai_model:
invocation_params = kwargs.get('invocation_params')
if invocation_params:
model_name = invocation_params.get('model_name')
if model_name:
self.chat_openai_model_name = model_name
prompt_tokens = 0
for message_list in messages:
message_string = ' '.join(cast(str, msg.content) for
msg in message_list)
num_tokens = get_num_tokens(message_string,
openai_model_name=self.chat_openai_model_name)
prompt_tokens += num_tokens
self._send_to_infino('prompt_tokens', prompt_tokens)
if self.verbose:
print(
f'on_chat_model_start: is_chat_openai_model= {self.is_chat_openai_model}, chat_openai_model_name={self.chat_openai_model_name}'
)
prompt = ' '.join(cast(str, msg.content) for sublist in messages for
msg in sublist)
self._send_to_infino('prompt', prompt, is_ts=False)
self.error = 0
self.start_time = time.time()
|
Run when LLM starts running.
|
test_human_ai_dialogue
|
messages = [HumanMessage(content='usr-msg-1'), AIMessage(content='ai-msg-1'
), HumanMessage(content='usr-msg-2'), AIMessage(content='ai-msg-2'),
HumanMessage(content='usr-msg-3')]
actual = model_cfg_sys_msg.predict_messages(messages).content
expected = """<s>[INST] <<SYS>>
sys-msg
<</SYS>>
usr-msg-1 [/INST] ai-msg-1 </s><s>[INST] usr-msg-2 [/INST] ai-msg-2 </s><s>[INST] usr-msg-3 [/INST]"""
assert actual == expected
|
def test_human_ai_dialogue(model_cfg_sys_msg: Llama2Chat) ->None:
messages = [HumanMessage(content='usr-msg-1'), AIMessage(content=
'ai-msg-1'), HumanMessage(content='usr-msg-2'), AIMessage(content=
'ai-msg-2'), HumanMessage(content='usr-msg-3')]
actual = model_cfg_sys_msg.predict_messages(messages).content
expected = """<s>[INST] <<SYS>>
sys-msg
<</SYS>>
usr-msg-1 [/INST] ai-msg-1 </s><s>[INST] usr-msg-2 [/INST] ai-msg-2 </s><s>[INST] usr-msg-3 [/INST]"""
assert actual == expected
| null |
test_qdrant_embedding_interface_raises_value_error
|
"""Test Qdrant requires only one method for embeddings."""
from qdrant_client import QdrantClient
client = QdrantClient(':memory:')
collection_name = uuid.uuid4().hex
with pytest.raises(ValueError):
Qdrant(client, collection_name, embeddings=embeddings,
embedding_function=embedding_function)
|
@pytest.mark.parametrize(['embeddings', 'embedding_function'], [(
ConsistentFakeEmbeddings(), ConsistentFakeEmbeddings().embed_query), (
None, None)])
def test_qdrant_embedding_interface_raises_value_error(embeddings: Optional
[Embeddings], embedding_function: Optional[Callable]) ->None:
"""Test Qdrant requires only one method for embeddings."""
from qdrant_client import QdrantClient
client = QdrantClient(':memory:')
collection_name = uuid.uuid4().hex
with pytest.raises(ValueError):
Qdrant(client, collection_name, embeddings=embeddings,
embedding_function=embedding_function)
|
Test Qdrant requires only one method for embeddings.
|
is_supported_location
|
"""Return whether the provided location is supported."""
try:
return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS
except ValueError:
return False
|
@staticmethod
def is_supported_location(location: str) ->bool:
"""Return whether the provided location is supported."""
try:
return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS
except ValueError:
return False
|
Return whether the provided location is supported.
|
__init__
|
self.connection_string = connection_string
self.ndims = ndims
self.table_name = table_name
self.embedding_function = embedding_function
self.pre_delete_table = pre_delete_table
self.logger = logger or logging.getLogger(__name__)
self.__post_init__()
|
def __init__(self, connection_string: str, embedding_function: Embeddings,
ndims: int=ADA_TOKEN_COUNT, table_name: str=
_LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool=False, logger:
Optional[logging.Logger]=None) ->None:
self.connection_string = connection_string
self.ndims = ndims
self.table_name = table_name
self.embedding_function = embedding_function
self.pre_delete_table = pre_delete_table
self.logger = logger or logging.getLogger(__name__)
self.__post_init__()
| null |
actual_decorator
|
if condition:
return decorator(func)
return func
|
def actual_decorator(func: Callable[[Any], Any]) ->Callable[[Any], Any]:
if condition:
return decorator(func)
return func
| null |
max_marginal_relevance_search_by_vector
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
brute_force: Whether to use brute force search. Defaults to False.
fraction_lists_to_search: Optional percentage of lists to search,
must be in range 0.0 and 1.0, exclusive.
If Node, uses service's default which is 0.05.
Returns:
List of Documents selected by maximal marginal relevance.
"""
doc_tuples = self._search_with_score_and_embeddings_by_vector(embedding,
fetch_k, filter, brute_force, fraction_lists_to_search)
doc_embeddings = [d[1] for d in doc_tuples]
mmr_doc_indexes = maximal_marginal_relevance(np.array(embedding),
doc_embeddings, lambda_mult=lambda_mult, k=k)
return [doc_tuples[i][0] for i in mmr_doc_indexes]
|
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=DEFAULT_TOP_K, fetch_k: int=DEFAULT_TOP_K * 5, lambda_mult: float=
0.5, filter: Optional[Dict[str, Any]]=None, brute_force: bool=False,
fraction_lists_to_search: Optional[float]=None, **kwargs: Any) ->List[
Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
brute_force: Whether to use brute force search. Defaults to False.
fraction_lists_to_search: Optional percentage of lists to search,
must be in range 0.0 and 1.0, exclusive.
If Node, uses service's default which is 0.05.
Returns:
List of Documents selected by maximal marginal relevance.
"""
doc_tuples = self._search_with_score_and_embeddings_by_vector(embedding,
fetch_k, filter, brute_force, fraction_lists_to_search)
doc_embeddings = [d[1] for d in doc_tuples]
mmr_doc_indexes = maximal_marginal_relevance(np.array(embedding),
doc_embeddings, lambda_mult=lambda_mult, k=k)
return [doc_tuples[i][0] for i in mmr_doc_indexes]
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
brute_force: Whether to use brute force search. Defaults to False.
fraction_lists_to_search: Optional percentage of lists to search,
must be in range 0.0 and 1.0, exclusive.
If Node, uses service's default which is 0.05.
Returns:
List of Documents selected by maximal marginal relevance.
|
_resolve_prompt
|
expected_input_vars = {'input', 'output', 'criteria'}
prompt_ = prompt or PROMPT
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f'Input variables should be {expected_input_vars}, but got {prompt_.input_variables}'
)
return prompt_
|
@classmethod
def _resolve_prompt(cls, prompt: Optional[BasePromptTemplate]=None
) ->BasePromptTemplate:
expected_input_vars = {'input', 'output', 'criteria'}
prompt_ = prompt or PROMPT
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f'Input variables should be {expected_input_vars}, but got {prompt_.input_variables}'
)
return prompt_
| null |
get_format_instructions
|
return FORMAT_INSTRUCTIONS
|
def get_format_instructions(self) ->str:
return FORMAT_INSTRUCTIONS
| null |
_import_self_hosted_hugging_face
|
from langchain_community.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
return SelfHostedHuggingFaceLLM
|
def _import_self_hosted_hugging_face() ->Any:
from langchain_community.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
return SelfHostedHuggingFaceLLM
| null |
test_dolly_call
|
"""Test valid call to dolly-v2."""
llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv(
'DOLLY_ENDPOINT_API_KEY'), endpoint_url=os.getenv('DOLLY_ENDPOINT_URL'),
deployment_name=os.getenv('DOLLY_DEPLOYMENT_NAME'), content_formatter=
DollyContentFormatter())
output = llm('Foo')
assert isinstance(output, str)
|
def test_dolly_call() ->None:
"""Test valid call to dolly-v2."""
llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv(
'DOLLY_ENDPOINT_API_KEY'), endpoint_url=os.getenv(
'DOLLY_ENDPOINT_URL'), deployment_name=os.getenv(
'DOLLY_DEPLOYMENT_NAME'), content_formatter=DollyContentFormatter())
output = llm('Foo')
assert isinstance(output, str)
|
Test valid call to dolly-v2.
|
_get_metadata
|
return {'repo': self.repo, 'ref': self.ref, 'path': self.path}
|
def _get_metadata(self) ->dict:
return {'repo': self.repo, 'ref': self.ref, 'path': self.path}
| null |
test_function_message_chunks
|
assert FunctionMessageChunk(name='hello', content='I am'
) + FunctionMessageChunk(name='hello', content=' indeed.'
) == FunctionMessageChunk(name='hello', content='I am indeed.'
), 'FunctionMessageChunk + FunctionMessageChunk should be a FunctionMessageChunk'
with pytest.raises(ValueError):
FunctionMessageChunk(name='hello', content='I am') + FunctionMessageChunk(
name='bye', content=' indeed.')
|
def test_function_message_chunks() ->None:
assert FunctionMessageChunk(name='hello', content='I am'
) + FunctionMessageChunk(name='hello', content=' indeed.'
) == FunctionMessageChunk(name='hello', content='I am indeed.'
), 'FunctionMessageChunk + FunctionMessageChunk should be a FunctionMessageChunk'
with pytest.raises(ValueError):
FunctionMessageChunk(name='hello', content='I am'
) + FunctionMessageChunk(name='bye', content=' indeed.')
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['konko_api_key'] = convert_to_secret_str(get_from_dict_or_env(values,
'konko_api_key', 'KONKO_API_KEY'))
try:
import konko
except ImportError:
raise ValueError(
'Could not import konko python package. Please install it with `pip install konko`.'
)
try:
values['client'] = konko.ChatCompletion
except AttributeError:
raise ValueError(
'`konko` has no `ChatCompletion` attribute, this is likely due to an old version of the konko package. Try upgrading it with `pip install --upgrade konko`.'
)
if values['n'] < 1:
raise ValueError('n must be at least 1.')
if values['n'] > 1 and values['streaming']:
raise ValueError('n must be 1 when streaming.')
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['konko_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'konko_api_key', 'KONKO_API_KEY'))
try:
import konko
except ImportError:
raise ValueError(
'Could not import konko python package. Please install it with `pip install konko`.'
)
try:
values['client'] = konko.ChatCompletion
except AttributeError:
raise ValueError(
'`konko` has no `ChatCompletion` attribute, this is likely due to an old version of the konko package. Try upgrading it with `pip install --upgrade konko`.'
)
if values['n'] < 1:
raise ValueError('n must be at least 1.')
if values['n'] > 1 and values['streaming']:
raise ValueError('n must be 1 when streaming.')
return values
|
Validate that api key and python package exists in environment.
|
_process_response
|
"""General-purpose response processing for single responses and streams"""
if hasattr(response, 'json'):
try:
return [response.json()]
except json.JSONDecodeError:
response = str(response.__dict__)
if isinstance(response, str):
msg_list = []
for msg in response.split('\n\n'):
if '{' not in msg:
continue
msg_list += [json.loads(msg[msg.find('{'):])]
return msg_list
raise ValueError(f'Received ill-formed response: {response}')
|
def _process_response(self, response: Union[str, Response]) ->List[dict]:
"""General-purpose response processing for single responses and streams"""
if hasattr(response, 'json'):
try:
return [response.json()]
except json.JSONDecodeError:
response = str(response.__dict__)
if isinstance(response, str):
msg_list = []
for msg in response.split('\n\n'):
if '{' not in msg:
continue
msg_list += [json.loads(msg[msg.find('{'):])]
return msg_list
raise ValueError(f'Received ill-formed response: {response}')
|
General-purpose response processing for single responses and streams
|
parse
|
stripped_request_params = None
splitted_request = request.strip().split(':')
if len(splitted_request) != 2:
raise OutputParserException(
f"Request '{request}' is not correctly formatted. Please refer to the format instructions."
)
result = {}
try:
request_type, request_params = splitted_request
if request_type in {'Invalid column', 'Invalid operation'}:
raise OutputParserException(
f'{request}. Please check the format instructions.')
array_exists = re.search('(\\[.*?\\])', request_params)
if array_exists:
parsed_array, stripped_request_params = self.parse_array(array_exists
.group(1), request_params)
if request_type == 'column':
filtered_df = self.dataframe[self.dataframe.index.isin(
parsed_array)]
if len(parsed_array) == 1:
result[stripped_request_params] = filtered_df[
stripped_request_params].iloc[parsed_array[0]]
else:
result[stripped_request_params] = filtered_df[
stripped_request_params]
elif request_type == 'row':
filtered_df = self.dataframe[self.dataframe.columns.
intersection(parsed_array)]
if len(parsed_array) == 1:
result[stripped_request_params] = filtered_df.iloc[int(
stripped_request_params)][parsed_array[0]]
else:
result[stripped_request_params] = filtered_df.iloc[int(
stripped_request_params)]
else:
filtered_df = self.dataframe[self.dataframe.index.isin(
parsed_array)]
result[request_type] = getattr(filtered_df[
stripped_request_params], request_type)()
elif request_type == 'column':
result[request_params] = self.dataframe[request_params]
elif request_type == 'row':
result[request_params] = self.dataframe.iloc[int(request_params)]
else:
result[request_type] = getattr(self.dataframe[request_params],
request_type)()
except (AttributeError, IndexError, KeyError):
if request_type not in {'column', 'row'}:
raise OutputParserException(
f"Unsupported request type '{request_type}'. Please check the format instructions."
)
raise OutputParserException(
f'Requested index {request_params if stripped_request_params is None else stripped_request_params} is out of bounds.'
)
return result
|
def parse(self, request: str) ->Dict[str, Any]:
stripped_request_params = None
splitted_request = request.strip().split(':')
if len(splitted_request) != 2:
raise OutputParserException(
f"Request '{request}' is not correctly formatted. Please refer to the format instructions."
)
result = {}
try:
request_type, request_params = splitted_request
if request_type in {'Invalid column', 'Invalid operation'}:
raise OutputParserException(
f'{request}. Please check the format instructions.')
array_exists = re.search('(\\[.*?\\])', request_params)
if array_exists:
parsed_array, stripped_request_params = self.parse_array(
array_exists.group(1), request_params)
if request_type == 'column':
filtered_df = self.dataframe[self.dataframe.index.isin(
parsed_array)]
if len(parsed_array) == 1:
result[stripped_request_params] = filtered_df[
stripped_request_params].iloc[parsed_array[0]]
else:
result[stripped_request_params] = filtered_df[
stripped_request_params]
elif request_type == 'row':
filtered_df = self.dataframe[self.dataframe.columns.
intersection(parsed_array)]
if len(parsed_array) == 1:
result[stripped_request_params] = filtered_df.iloc[int(
stripped_request_params)][parsed_array[0]]
else:
result[stripped_request_params] = filtered_df.iloc[int(
stripped_request_params)]
else:
filtered_df = self.dataframe[self.dataframe.index.isin(
parsed_array)]
result[request_type] = getattr(filtered_df[
stripped_request_params], request_type)()
elif request_type == 'column':
result[request_params] = self.dataframe[request_params]
elif request_type == 'row':
result[request_params] = self.dataframe.iloc[int(request_params)]
else:
result[request_type] = getattr(self.dataframe[request_params],
request_type)()
except (AttributeError, IndexError, KeyError):
if request_type not in {'column', 'row'}:
raise OutputParserException(
f"Unsupported request type '{request_type}'. Please check the format instructions."
)
raise OutputParserException(
f'Requested index {request_params if stripped_request_params is None else stripped_request_params} is out of bounds.'
)
return result
| null |
__init__
|
"""Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
from langchain_community.globals import set_llm_cache
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
set_llm_cache(GPTCache(init_gptcache))
"""
try:
import gptcache
except ImportError:
raise ImportError(
'Could not import gptcache python package. Please install it with `pip install gptcache`.'
)
self.init_gptcache_func: Union[Callable[[Any, str], None], Callable[[Any],
None], None] = init_func
self.gptcache_dict: Dict[str, Any] = {}
|
def __init__(self, init_func: Union[Callable[[Any, str], None], Callable[[
Any], None], None]=None):
"""Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
from langchain_community.globals import set_llm_cache
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
set_llm_cache(GPTCache(init_gptcache))
"""
try:
import gptcache
except ImportError:
raise ImportError(
'Could not import gptcache python package. Please install it with `pip install gptcache`.'
)
self.init_gptcache_func: Union[Callable[[Any, str], None], Callable[[
Any], None], None] = init_func
self.gptcache_dict: Dict[str, Any] = {}
|
Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
from langchain_community.globals import set_llm_cache
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
set_llm_cache(GPTCache(init_gptcache))
|
embed_query
|
"""Return random floats."""
return list(np.random.uniform(0, 1, 10))
|
def embed_query(self, text: str) ->List[float]:
"""Return random floats."""
return list(np.random.uniform(0, 1, 10))
|
Return random floats.
|
format_docs
|
loaded_docs = [load(doc) for doc in docs]
return '\n'.join([f"""<Document id={i}>
{doc.page_content}
</Document>""" for
i, doc in enumerate(loaded_docs)])
|
def format_docs(docs: list) ->str:
loaded_docs = [load(doc) for doc in docs]
return '\n'.join([f'<Document id={i}>\n{doc.page_content}\n</Document>' for
i, doc in enumerate(loaded_docs)])
| null |
test_promptlayer_openai_call
|
"""Test valid call to promptlayer openai."""
llm = PromptLayerOpenAI(max_tokens=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_promptlayer_openai_call() ->None:
"""Test valid call to promptlayer openai."""
llm = PromptLayerOpenAI(max_tokens=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to promptlayer openai.
|
from_colored_object_prompt
|
"""Load PAL from colored object prompt.
Args:
llm (BaseLanguageModel): The language model to use for generating code.
Returns:
PALChain: An instance of PALChain.
"""
llm_chain = LLMChain(llm=llm, prompt=COLORED_OBJECT_PROMPT)
code_validations = PALValidation(solution_expression_name='answer',
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE)
return cls(llm_chain=llm_chain, stop='\n\n\n', get_answer_expr=
'print(answer)', code_validations=code_validations, **kwargs)
|
@classmethod
def from_colored_object_prompt(cls, llm: BaseLanguageModel, **kwargs: Any
) ->PALChain:
"""Load PAL from colored object prompt.
Args:
llm (BaseLanguageModel): The language model to use for generating code.
Returns:
PALChain: An instance of PALChain.
"""
llm_chain = LLMChain(llm=llm, prompt=COLORED_OBJECT_PROMPT)
code_validations = PALValidation(solution_expression_name='answer',
solution_expression_type=PALValidation.
SOLUTION_EXPRESSION_TYPE_VARIABLE)
return cls(llm_chain=llm_chain, stop='\n\n\n', get_answer_expr=
'print(answer)', code_validations=code_validations, **kwargs)
|
Load PAL from colored object prompt.
Args:
llm (BaseLanguageModel): The language model to use for generating code.
Returns:
PALChain: An instance of PALChain.
|
__iter__
|
return self
|
def __iter__(self) ->'LineIterator':
return self
| null |
test_solve_sudoku
|
"""Test simple question that should not need python."""
tot_chain = ToTChain(llm=fake_llm_sudoku, checker=SudokuChecker(), k=len(
solutions), c=4, tot_strategy_class=SampleCoTStrategy)
output = tot_chain.run({'problem_description': ''})
assert output == sudoku_solution
|
@pytest.mark.requires('jinja2')
def test_solve_sudoku(fake_llm_sudoku: FakeLLM) ->None:
"""Test simple question that should not need python."""
tot_chain = ToTChain(llm=fake_llm_sudoku, checker=SudokuChecker(), k=
len(solutions), c=4, tot_strategy_class=SampleCoTStrategy)
output = tot_chain.run({'problem_description': ''})
assert output == sudoku_solution
|
Test simple question that should not need python.
|
_get_entity_action
|
prompt = PromptTemplate.from_template(
'What is the {entity} doing in the following observation? {observation}' +
'\nThe {entity} is')
return self.chain(prompt).run(entity=entity_name, observation=observation
).strip()
|
def _get_entity_action(self, observation: str, entity_name: str) ->str:
prompt = PromptTemplate.from_template(
'What is the {entity} doing in the following observation? {observation}'
+ '\nThe {entity} is')
return self.chain(prompt).run(entity=entity_name, observation=observation
).strip()
| null |
test_ai21_call
|
"""Test valid call to ai21."""
llm = AI21(maxTokens=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_ai21_call() ->None:
"""Test valid call to ai21."""
llm = AI21(maxTokens=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to ai21.
|
test_visit_comparison_nin
|
comp = Comparison(comparator=Comparator.NIN, attribute='name', value='foo')
expected = {'name': {'$nin': ['foo']}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison_nin() ->None:
comp = Comparison(comparator=Comparator.NIN, attribute='name', value='foo')
expected = {'name': {'$nin': ['foo']}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
split_text
|
"""Split HTML text string
Args:
text: HTML text
"""
return self.split_text_from_file(StringIO(text))
|
def split_text(self, text: str) ->List[Document]:
"""Split HTML text string
Args:
text: HTML text
"""
return self.split_text_from_file(StringIO(text))
|
Split HTML text string
Args:
text: HTML text
|
wrong_output_keys
|
assert 'foo' in inputs
assert 'baz' in inputs
return {'not foo': 'foo', 'not baz': 'baz'}
|
def wrong_output_keys(inputs: dict) ->dict:
assert 'foo' in inputs
assert 'baz' in inputs
return {'not foo': 'foo', 'not baz': 'baz'}
| null |
lc_secrets
|
return {'dashscope_api_key': 'DASHSCOPE_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'dashscope_api_key': 'DASHSCOPE_API_KEY'}
| null |
_import_ifttt
|
from langchain_community.tools.ifttt import IFTTTWebhook
return IFTTTWebhook
|
def _import_ifttt() ->Any:
from langchain_community.tools.ifttt import IFTTTWebhook
return IFTTTWebhook
| null |
_identifying_params
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'endpoint_url': self.endpoint_url, 'task': self.task}, **{
'model_kwargs': _model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'endpoint_url': self.endpoint_url, 'task': self.task}, **{
'model_kwargs': _model_kwargs}}
|
Get the identifying parameters.
|
_llm_type
|
return 'vertexai_model_garden'
|
@property
def _llm_type(self) ->str:
return 'vertexai_model_garden'
| null |
on_chain_end
|
self.on_chain_end_common()
|
def on_chain_end(self, *args: Any, **kwargs: Any) ->Any:
self.on_chain_end_common()
| null |
add_task
|
self.task_list.append(task)
|
def add_task(self, task: Dict) ->None:
self.task_list.append(task)
| null |
ngram_overlap_score
|
"""Compute ngram overlap score of source and example as sentence_bleu score.
Use sentence_bleu with method1 smoothing function and auto reweighting.
Return float value between 0.0 and 1.0 inclusive.
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
hypotheses = source[0].split()
references = [s.split() for s in example]
return float(sentence_bleu(references, hypotheses, smoothing_function=
SmoothingFunction().method1, auto_reweigh=True))
|
def ngram_overlap_score(source: List[str], example: List[str]) ->float:
"""Compute ngram overlap score of source and example as sentence_bleu score.
Use sentence_bleu with method1 smoothing function and auto reweighting.
Return float value between 0.0 and 1.0 inclusive.
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
hypotheses = source[0].split()
references = [s.split() for s in example]
return float(sentence_bleu(references, hypotheses, smoothing_function=
SmoothingFunction().method1, auto_reweigh=True))
|
Compute ngram overlap score of source and example as sentence_bleu score.
Use sentence_bleu with method1 smoothing function and auto reweighting.
Return float value between 0.0 and 1.0 inclusive.
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
|
deprecated_method
|
"""original doc"""
return 'This is a deprecated method.'
|
@deprecated(since='2.0.0', removal='3.0.0')
def deprecated_method(self) ->str:
"""original doc"""
return 'This is a deprecated method.'
|
original doc
|
test_simple_question
|
"""Test simple question that should not need python."""
question = 'What is 1 plus 1?'
output = fake_llm_math_chain.run(question)
assert output == 'Answer: 2'
|
@pytest.mark.requires('numexpr')
def test_simple_question(fake_llm_math_chain: LLMMathChain) ->None:
"""Test simple question that should not need python."""
question = 'What is 1 plus 1?'
output = fake_llm_math_chain.run(question)
assert output == 'Answer: 2'
|
Test simple question that should not need python.
|
lazy_parse
|
"""Parse a Microsoft Word document into the Document iterator.
Args:
blob: The blob to parse.
Returns: An iterator of Documents.
"""
try:
from unstructured.partition.doc import partition_doc
from unstructured.partition.docx import partition_docx
except ImportError as e:
raise ImportError(
'Could not import unstructured, please install with `pip install unstructured`.'
) from e
mime_type_parser = {'application/msword': partition_doc,
'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
partition_docx}
if blob.mimetype not in ('application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'):
raise ValueError('This blob type is not supported for this parser.')
with blob.as_bytes_io() as word_document:
elements = mime_type_parser[blob.mimetype](file=word_document)
text = '\n\n'.join([str(el) for el in elements])
metadata = {'source': blob.source}
yield Document(page_content=text, metadata=metadata)
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Parse a Microsoft Word document into the Document iterator.
Args:
blob: The blob to parse.
Returns: An iterator of Documents.
"""
try:
from unstructured.partition.doc import partition_doc
from unstructured.partition.docx import partition_docx
except ImportError as e:
raise ImportError(
'Could not import unstructured, please install with `pip install unstructured`.'
) from e
mime_type_parser = {'application/msword': partition_doc,
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
: partition_docx}
if blob.mimetype not in ('application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
):
raise ValueError('This blob type is not supported for this parser.')
with blob.as_bytes_io() as word_document:
elements = mime_type_parser[blob.mimetype](file=word_document)
text = '\n\n'.join([str(el) for el in elements])
metadata = {'source': blob.source}
yield Document(page_content=text, metadata=metadata)
|
Parse a Microsoft Word document into the Document iterator.
Args:
blob: The blob to parse.
Returns: An iterator of Documents.
|
test_load_arxiv_from_universal_entry
|
arxiv_tool = _load_arxiv_from_universal_entry()
output = arxiv_tool('Caprice Stanley')
assert 'On Mixing Behavior of a Family of Random Walks' in output, 'failed to fetch a valid result'
|
def test_load_arxiv_from_universal_entry() ->None:
arxiv_tool = _load_arxiv_from_universal_entry()
output = arxiv_tool('Caprice Stanley')
assert 'On Mixing Behavior of a Family of Random Walks' in output, 'failed to fetch a valid result'
| null |
_construct_agent_scratchpad
|
if len(intermediate_steps) == 0:
return ''
thoughts = ''
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f'\nObservation: {observation}\nThought: '
return f"""This was your previous work (but I haven't seen any of it! I only see what you return as final answer):
{thoughts}"""
|
def _construct_agent_scratchpad(self, intermediate_steps: List[Tuple[
AgentAction, str]]) ->str:
if len(intermediate_steps) == 0:
return ''
thoughts = ''
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f'\nObservation: {observation}\nThought: '
return f"""This was your previous work (but I haven't seen any of it! I only see what you return as final answer):
{thoughts}"""
| null |
clear
|
"""Clear cache that can take additional keyword arguments."""
|
@abstractmethod
def clear(self, **kwargs: Any) ->None:
"""Clear cache that can take additional keyword arguments."""
|
Clear cache that can take additional keyword arguments.
|
test_prompt_jinja2_extra_input_variables
|
"""Test error is raised when there are too many input variables."""
prefix = 'Starting with {{ foo }}'
suffix = 'Ending with {{ bar }}'
with pytest.warns(UserWarning):
FewShotPromptTemplate(input_variables=['bar', 'foo', 'extra', 'thing'],
suffix=suffix, prefix=prefix, examples=example_jinja2_prompt[1],
example_prompt=example_jinja2_prompt[0], template_format='jinja2',
validate_template=True)
assert FewShotPromptTemplate(input_variables=['bar', 'foo', 'extra',
'thing'], suffix=suffix, prefix=prefix, examples=example_jinja2_prompt[
1], example_prompt=example_jinja2_prompt[0], template_format='jinja2'
).input_variables == ['bar', 'foo']
|
@pytest.mark.requires('jinja2')
def test_prompt_jinja2_extra_input_variables(example_jinja2_prompt: Tuple[
PromptTemplate, List[Dict[str, str]]]) ->None:
"""Test error is raised when there are too many input variables."""
prefix = 'Starting with {{ foo }}'
suffix = 'Ending with {{ bar }}'
with pytest.warns(UserWarning):
FewShotPromptTemplate(input_variables=['bar', 'foo', 'extra',
'thing'], suffix=suffix, prefix=prefix, examples=
example_jinja2_prompt[1], example_prompt=example_jinja2_prompt[
0], template_format='jinja2', validate_template=True)
assert FewShotPromptTemplate(input_variables=['bar', 'foo', 'extra',
'thing'], suffix=suffix, prefix=prefix, examples=
example_jinja2_prompt[1], example_prompt=example_jinja2_prompt[0],
template_format='jinja2').input_variables == ['bar', 'foo']
|
Test error is raised when there are too many input variables.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.