language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | chroma-core__chroma | chromadb/api/models/Collection.py | {
"start": 783,
"end": 20444
} | class ____(CollectionCommon["ServerAPI"]):
def count(self) -> int:
"""The total number of embeddings added to the database
Returns:
int: The total number of embeddings added to the database
"""
return self._client._count(
collection_id=self.id,
tenant=self.tenant,
database=self.database,
)
def add(
self,
ids: OneOrMany[ID],
embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
images: Optional[OneOrMany[Image]] = None,
uris: Optional[OneOrMany[URI]] = None,
) -> None:
"""Add embeddings to the data store.
Args:
ids: The ids of the embeddings you wish to add
embeddings: The embeddings to add. If None, embeddings will be computed based on the documents or images using the embedding_function set for the Collection. Optional.
metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
images: The images to associate with the embeddings. Optional.
uris: The uris of the images to associate with the embeddings. Optional.
Returns:
None
Raises:
ValueError: If you don't provide either embeddings or documents
ValueError: If the length of ids, embeddings, metadatas, or documents don't match
ValueError: If you don't provide an embedding function and don't provide embeddings
ValueError: If you provide both embeddings and documents
ValueError: If you provide an id that already exists
"""
add_request = self._validate_and_prepare_add_request(
ids=ids,
embeddings=embeddings,
metadatas=metadatas,
documents=documents,
images=images,
uris=uris,
)
self._client._add(
collection_id=self.id,
ids=add_request["ids"],
embeddings=add_request["embeddings"],
metadatas=add_request["metadatas"],
documents=add_request["documents"],
uris=add_request["uris"],
tenant=self.tenant,
database=self.database,
)
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents"],
) -> GetResult:
"""Get embeddings and their associate data from the data store. If no ids or where filter is provided returns
all embeddings up to limit starting at offset.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by. E.g. `{"$and": [{"color" : "red"}, {"price": {"$gte": 4.20}}]}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from. Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents. E.g. `{"$contains": "hello"}`. Optional.
include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional.
Returns:
GetResult: A GetResult object containing the results.
"""
get_request = self._validate_and_prepare_get_request(
ids=ids,
where=where,
where_document=where_document,
include=include,
)
get_results = self._client._get(
collection_id=self.id,
ids=get_request["ids"],
where=get_request["where"],
where_document=get_request["where_document"],
include=get_request["include"],
limit=limit,
offset=offset,
tenant=self.tenant,
database=self.database,
)
return self._transform_get_response(
response=get_results, include=get_request["include"]
)
def peek(self, limit: int = 10) -> GetResult:
"""Get the first few results in the database up to limit
Args:
limit: The number of results to return.
Returns:
GetResult: A GetResult object containing the results.
"""
return self._transform_peek_response(
self._client._peek(
collection_id=self.id,
n=limit,
tenant=self.tenant,
database=self.database,
)
)
def query(
self,
query_embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
] = None,
query_texts: Optional[OneOrMany[Document]] = None,
query_images: Optional[OneOrMany[Image]] = None,
query_uris: Optional[OneOrMany[URI]] = None,
ids: Optional[OneOrMany[ID]] = None,
n_results: int = 10,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
include: Include = [
"metadatas",
"documents",
"distances",
],
) -> QueryResult:
"""Get the n_results nearest neighbor embeddings for provided query_embeddings or query_texts.
Args:
query_embeddings: The embeddings to get the closes neighbors of. Optional.
query_texts: The document texts to get the closes neighbors of. Optional.
query_images: The images to get the closes neighbors of. Optional.
query_uris: The URIs to be used with data loader. Optional.
ids: A subset of ids to search within. Optional.
n_results: The number of neighbors to return for each query_embedding or query_texts. Optional.
where: A Where type dict used to filter results by. E.g. `{"$and": [{"color" : "red"}, {"price": {"$gte": 4.20}}]}`. Optional.
where_document: A WhereDocument type dict used to filter by the documents. E.g. `{"$contains": "hello"}`. Optional.
include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`, `"distances"`. Ids are always included. Defaults to `["metadatas", "documents", "distances"]`. Optional.
Returns:
QueryResult: A QueryResult object containing the results.
Raises:
ValueError: If you don't provide either query_embeddings, query_texts, or query_images
ValueError: If you provide both query_embeddings and query_texts
ValueError: If you provide both query_embeddings and query_images
ValueError: If you provide both query_texts and query_images
"""
query_request = self._validate_and_prepare_query_request(
query_embeddings=query_embeddings,
query_texts=query_texts,
query_images=query_images,
query_uris=query_uris,
ids=ids,
n_results=n_results,
where=where,
where_document=where_document,
include=include,
)
query_results = self._client._query(
collection_id=self.id,
ids=query_request["ids"],
query_embeddings=query_request["embeddings"],
n_results=query_request["n_results"],
where=query_request["where"],
where_document=query_request["where_document"],
include=query_request["include"],
tenant=self.tenant,
database=self.database,
)
return self._transform_query_response(
response=query_results, include=query_request["include"]
)
def modify(
self,
name: Optional[str] = None,
metadata: Optional[CollectionMetadata] = None,
configuration: Optional[UpdateCollectionConfiguration] = None,
) -> None:
"""Modify the collection name or metadata
Args:
name: The updated name for the collection. Optional.
metadata: The updated metadata for the collection. Optional.
Returns:
None
"""
self._validate_modify_request(metadata)
# Note there is a race condition here where the metadata can be updated
# but another thread sees the cached local metadata.
# TODO: fixme
self._client._modify(
id=self.id,
new_name=name,
new_metadata=metadata,
new_configuration=configuration,
tenant=self.tenant,
database=self.database,
)
self._update_model_after_modify_success(name, metadata, configuration)
def fork(
self,
new_name: str,
) -> "Collection":
"""Fork the current collection under a new name. The returning collection should contain identical data to the current collection.
This is an experimental API that only works for Hosted Chroma for now.
Args:
new_name: The name of the new collection.
Returns:
Collection: A new collection with the specified name and containing identical data to the current collection.
"""
model = self._client._fork(
collection_id=self.id,
new_name=new_name,
tenant=self.tenant,
database=self.database,
)
return Collection(
client=self._client,
model=model,
embedding_function=self._embedding_function,
data_loader=self._data_loader,
)
def search(
self,
searches: OneOrMany[Search],
) -> SearchResult:
"""Perform hybrid search on the collection.
This is an experimental API that only works for Hosted Chroma for now.
Args:
searches: A single Search object or a list of Search objects, each containing:
- where: Where expression for filtering
- rank: Ranking expression for hybrid search (defaults to Val(0.0))
- limit: Limit configuration for pagination (defaults to no limit)
- select: Select configuration for keys to return (defaults to empty)
Returns:
SearchResult: Column-major format response with:
- ids: List of result IDs for each search payload
- documents: Optional documents for each payload
- embeddings: Optional embeddings for each payload
- metadatas: Optional metadata for each payload
- scores: Optional scores for each payload
- select: List of selected keys for each payload
Raises:
NotImplementedError: For local/segment API implementations
Examples:
# Using builder pattern with Key constants
from chromadb.execution.expression import (
Search, Key, K, Knn, Val
)
# Note: K is an alias for Key, so K.DOCUMENT == Key.DOCUMENT
search = (Search()
.where((K("category") == "science") & (K("score") > 0.5))
.rank(Knn(query=[0.1, 0.2, 0.3]) * 0.8 + Val(0.5) * 0.2)
.limit(10, offset=0)
.select(K.DOCUMENT, K.SCORE, "title"))
# Direct construction
from chromadb.execution.expression import (
Search, Eq, And, Gt, Knn, Limit, Select, Key
)
search = Search(
where=And([Eq("category", "science"), Gt("score", 0.5)]),
rank=Knn(query=[0.1, 0.2, 0.3]),
limit=Limit(offset=0, limit=10),
select=Select(keys={Key.DOCUMENT, Key.SCORE, "title"})
)
# Single search
result = collection.search(search)
# Multiple searches at once
searches = [
Search().where(K("type") == "article").rank(Knn(query=[0.1, 0.2])),
Search().where(K("type") == "paper").rank(Knn(query=[0.3, 0.4]))
]
results = collection.search(searches)
"""
# Convert single search to list for consistent handling
searches_list = maybe_cast_one_to_many(searches)
if searches_list is None:
searches_list = []
# Embed any string queries in Knn objects
embedded_searches = [
self._embed_search_string_queries(search) for search in searches_list
]
return self._client._search(
collection_id=self.id,
searches=cast(List[Search], embedded_searches),
tenant=self.tenant,
database=self.database,
)
def update(
self,
ids: OneOrMany[ID],
embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
images: Optional[OneOrMany[Image]] = None,
uris: Optional[OneOrMany[URI]] = None,
) -> None:
"""Update the embeddings, metadatas or documents for provided ids.
Args:
ids: The ids of the embeddings to update
embeddings: The embeddings to update. If None, embeddings will be computed based on the documents or images using the embedding_function set for the Collection. Optional.
metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
images: The images to associate with the embeddings. Optional.
Returns:
None
"""
update_request = self._validate_and_prepare_update_request(
ids=ids,
embeddings=embeddings,
metadatas=metadatas,
documents=documents,
images=images,
uris=uris,
)
self._client._update(
collection_id=self.id,
ids=update_request["ids"],
embeddings=update_request["embeddings"],
metadatas=update_request["metadatas"],
documents=update_request["documents"],
uris=update_request["uris"],
tenant=self.tenant,
database=self.database,
)
def upsert(
self,
ids: OneOrMany[ID],
embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
images: Optional[OneOrMany[Image]] = None,
uris: Optional[OneOrMany[URI]] = None,
) -> None:
"""Update the embeddings, metadatas or documents for provided ids, or create them if they don't exist.
Args:
ids: The ids of the embeddings to update
embeddings: The embeddings to add. If None, embeddings will be computed based on the documents using the embedding_function set for the Collection. Optional.
metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
Returns:
None
"""
upsert_request = self._validate_and_prepare_upsert_request(
ids=ids,
embeddings=embeddings,
metadatas=metadatas,
documents=documents,
images=images,
uris=uris,
)
self._client._upsert(
collection_id=self.id,
ids=upsert_request["ids"],
embeddings=upsert_request["embeddings"],
metadatas=upsert_request["metadatas"],
documents=upsert_request["documents"],
uris=upsert_request["uris"],
tenant=self.tenant,
database=self.database,
)
def delete(
self,
ids: Optional[IDs] = None,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
) -> None:
"""Delete the embeddings based on ids and/or a where filter
Args:
ids: The ids of the embeddings to delete
where: A Where type dict used to filter the delection by. E.g. `{"$and": [{"color" : "red"}, {"price": {"$gte": 4.20}]}}`. Optional.
where_document: A WhereDocument type dict used to filter the deletion by the document content. E.g. `{"$contains": "hello"}`. Optional.
Returns:
None
Raises:
ValueError: If you don't provide either ids, where, or where_document
"""
delete_request = self._validate_and_prepare_delete_request(
ids, where, where_document
)
self._client._delete(
collection_id=self.id,
ids=delete_request["ids"],
where=delete_request["where"],
where_document=delete_request["where_document"],
tenant=self.tenant,
database=self.database,
)
def attach_function(
self,
function_id: str,
name: str,
output_collection: str,
params: Optional[Dict[str, Any]] = None,
) -> "AttachedFunction":
"""Attach a function to this collection.
Args:
function_id: Built-in function identifier (e.g., "record_counter")
name: Unique name for this attached function
output_collection: Name of the collection where function output will be stored
params: Optional dictionary with function-specific parameters
Returns:
AttachedFunction: Object representing the attached function
Example:
>>> attached_fn = collection.attach_function(
... function_id="record_counter",
... name="mycoll_stats_fn",
... output_collection="mycoll_stats",
... params={"threshold": 100}
... )
"""
return self._client.attach_function(
function_id=function_id,
name=name,
input_collection_id=self.id,
output_collection=output_collection,
params=params,
tenant=self.tenant,
database=self.database,
)
def get_attached_function(self, name: str) -> "AttachedFunction":
"""Get an attached function by name for this collection.
Args:
name: Name of the attached function
Returns:
AttachedFunction: The attached function object
Raises:
NotFoundError: If the attached function doesn't exist
"""
return self._client.get_attached_function(
name=name,
input_collection_id=self.id,
tenant=self.tenant,
database=self.database,
)
| Collection |
python | ray-project__ray | rllib/connectors/learner/general_advantage_estimation.py | {
"start": 791,
"end": 8785
} | class ____(ConnectorV2):
"""Learner ConnectorV2 piece computing GAE advantages and value targets on episodes.
This ConnectorV2:
- Operates on a list of Episode objects (single- or multi-agent).
- Should be used only in the Learner pipeline and as one of the last pieces (due
to the fact that it requires the batch for the value functions to be already
complete).
- Requires the incoming episodes to already be elongated by one artificial timestep
at the end (last obs, actions, states, etc.. repeated, last reward=0.0, etc..),
making it possible to combine the per-timestep value computations with the
necessary "bootstrap" value computations at the episode (chunk) truncation points.
The extra timestep should be added using the `ray.rllib.connectors.learner.
add_one_ts_to_episodes_and_truncate.AddOneTsToEpisodesAndTruncate` connector piece.
The GAE computation is performed in an efficient way through using the arriving
`batch` as forward batch for the value function, extracting the bootstrap values
(at the artificially added time steos) and all other value predictions (all other
timesteps), performing GAE, and adding the results back into `batch` (under
Postprocessing.ADVANTAGES and Postprocessing.VALUE_TARGETS.
"""
def __init__(
self,
input_observation_space=None,
input_action_space=None,
*,
gamma,
lambda_,
):
"""Initializes a GeneralAdvantageEstimation instance.
Args:
gamma: The discount factor gamma.
lambda_: The lambda parameter for General Advantage Estimation (GAE).
Defines the exponential weight used between actually measured rewards
vs value function estimates over multiple time steps. Specifically,
`lambda_` balances short-term, low-variance estimates with longer-term,
high-variance returns. A `lambda_` or 0.0 makes the GAE rely only on
immediate rewards (and vf predictions from there on, reducing variance,
but increasing bias), while a `lambda_` of 1.0 only incorporates vf
predictions at the truncation points of the given episodes or episode
chunks (reducing bias but increasing variance).
"""
super().__init__(input_observation_space, input_action_space)
self.gamma = gamma
self.lambda_ = lambda_
# Internal numpy-to-tensor connector to translate GAE results (advantages and
# vf targets) into tensors.
self._numpy_to_tensor_connector = None
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: MultiRLModule,
episodes: List[EpisodeType],
batch: Dict[str, Any],
**kwargs,
):
# Device to place all GAE result tensors (advantages and value targets) on.
device = None
# Extract all single-agent episodes.
sa_episodes_list = list(
self.single_agent_episode_iterator(episodes, agents_that_stepped_only=False)
)
# Perform the value nets' forward passes.
# TODO (sven): We need to check here in the pipeline already, whether a module
# should even be updated or not (which we usually do after(!) the Learner
# pipeline). This is an open TODO to move this filter into a connector as well.
# For now, we'll just check, whether `mid` is in batch and skip if it isn't.
vf_preds = rl_module.foreach_module(
func=lambda mid, module: (
module.compute_values(batch[mid])
if mid in batch and isinstance(module, ValueFunctionAPI)
else None
),
return_dict=True,
)
# Loop through all modules and perform each one's GAE computation.
for module_id, module_vf_preds in vf_preds.items():
# Skip those outputs of RLModules that are not implementers of
# `ValueFunctionAPI`.
if module_vf_preds is None:
continue
module = rl_module[module_id]
device = module_vf_preds.device
# Convert to numpy for the upcoming GAE computations.
module_vf_preds = convert_to_numpy(module_vf_preds)
# Collect (single-agent) episode lengths for this particular module.
episode_lens = [
len(e) for e in sa_episodes_list if e.module_id in [None, module_id]
]
# Remove all zero-padding again, if applicable, for the upcoming
# GAE computations.
module_vf_preds = unpad_data_if_necessary(episode_lens, module_vf_preds)
# Compute value targets.
module_value_targets = compute_value_targets(
values=module_vf_preds,
rewards=unpad_data_if_necessary(
episode_lens,
convert_to_numpy(batch[module_id][Columns.REWARDS]),
),
terminateds=unpad_data_if_necessary(
episode_lens,
convert_to_numpy(batch[module_id][Columns.TERMINATEDS]),
),
truncateds=unpad_data_if_necessary(
episode_lens,
convert_to_numpy(batch[module_id][Columns.TRUNCATEDS]),
),
gamma=self.gamma,
lambda_=self.lambda_,
)
assert module_value_targets.shape[0] == sum(episode_lens)
module_advantages = module_value_targets - module_vf_preds
# Drop vf-preds, not needed in loss. Note that in the DefaultPPORLModule,
# vf-preds are recomputed with each `forward_train` call anyway to compute
# the vf loss.
# Standardize advantages (used for more stable and better weighted
# policy gradient computations).
module_advantages = (module_advantages - module_advantages.mean()) / max(
1e-4, module_advantages.std()
)
# Zero-pad the new computations, if necessary.
if module.is_stateful():
module_advantages = np.stack(
split_and_zero_pad_n_episodes(
module_advantages,
episode_lens=episode_lens,
max_seq_len=module.model_config["max_seq_len"],
),
axis=0,
)
module_value_targets = np.stack(
split_and_zero_pad_n_episodes(
module_value_targets,
episode_lens=episode_lens,
max_seq_len=module.model_config["max_seq_len"],
),
axis=0,
)
batch[module_id][Postprocessing.ADVANTAGES] = module_advantages
batch[module_id][Postprocessing.VALUE_TARGETS] = module_value_targets
# Convert all GAE results to tensors.
if self._numpy_to_tensor_connector is None:
self._numpy_to_tensor_connector = NumpyToTensor(
as_learner_connector=True, device=device
)
tensor_results = self._numpy_to_tensor_connector(
rl_module=rl_module,
batch={
mid: {
Postprocessing.ADVANTAGES: module_batch[Postprocessing.ADVANTAGES],
Postprocessing.VALUE_TARGETS: (
module_batch[Postprocessing.VALUE_TARGETS]
),
}
for mid, module_batch in batch.items()
if vf_preds[mid] is not None
},
episodes=episodes,
)
# Move converted tensors back to `batch`.
for mid, module_batch in tensor_results.items():
batch[mid].update(module_batch)
return batch
| GeneralAdvantageEstimation |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 94159,
"end": 102064
} | class ____:
def test_logpmf(self):
vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7))
assert_allclose(vals1, -1.483270127243324, rtol=1e-8)
vals2 = multinomial.logpmf([3, 4], 0, [.3, .7])
assert vals2 == -np.inf
vals3 = multinomial.logpmf([0, 0], 0, [.3, .7])
assert vals3 == 0
vals4 = multinomial.logpmf([3, 4], 0, [-2, 3])
assert_allclose(vals4, np.nan, rtol=1e-8)
def test_reduces_binomial(self):
# test that the multinomial pmf reduces to the binomial pmf in the 2d
# case
val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7))
val2 = binom.logpmf(3, 7, 0.3)
assert_allclose(val1, val2, rtol=1e-8)
val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9))
val2 = binom.pmf(6, 14, 0.1)
assert_allclose(val1, val2, rtol=1e-8)
def test_R(self):
# test against the values produced by this R code
# (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html)
# X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
# X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
# X
# apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5)))
n, p = 3, [1./8, 2./8, 5./8]
r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375,
(2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125,
(0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500,
(2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500,
(1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000}
for x in r_vals:
assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14)
@pytest.mark.parametrize("n", [0, 3])
def test_rvs_np(self, n):
# test that .rvs agrees w/numpy
message = "Some rows of `p` do not sum to 1.0 within..."
with pytest.warns(FutureWarning, match=message):
rndm = np.random.RandomState(123)
sc_rvs = multinomial.rvs(n, [1/4.]*3, size=7, random_state=123)
np_rvs = rndm.multinomial(n, [1/4.]*3, size=7)
assert_equal(sc_rvs, np_rvs)
with pytest.warns(FutureWarning, match=message):
rndm = np.random.RandomState(123)
sc_rvs = multinomial.rvs(n, [1/4.]*5, size=7, random_state=123)
np_rvs = rndm.multinomial(n, [1/4.]*5, size=7)
assert_equal(sc_rvs, np_rvs)
def test_pmf(self):
vals0 = multinomial.pmf((5,), 5, (1,))
assert_allclose(vals0, 1, rtol=1e-8)
vals1 = multinomial.pmf((3,4), 7, (.3, .7))
assert_allclose(vals1, .22689449999999994, rtol=1e-8)
vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8,
(.1, .9))
assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8)
x = np.empty((0,2), dtype=np.float64)
vals3 = multinomial.pmf(x, 4, (.3, .7))
assert_equal(vals3, np.empty([], dtype=np.float64))
vals4 = multinomial.pmf([1,2], 4, (.3, .7))
assert_allclose(vals4, 0, rtol=1e-8)
vals5 = multinomial.pmf([3, 3, 0], 6, [2/3.0, 1/3.0, 0])
assert_allclose(vals5, 0.219478737997, rtol=1e-8)
vals5 = multinomial.pmf([0, 0, 0], 0, [2/3.0, 1/3.0, 0])
assert vals5 == 1
vals6 = multinomial.pmf([2, 1, 0], 0, [2/3.0, 1/3.0, 0])
assert vals6 == 0
def test_pmf_broadcasting(self):
vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]])
assert_allclose(vals0, [.243, .384], rtol=1e-8)
vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9])
assert_allclose(vals1, [.243, 0], rtol=1e-8)
vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9])
assert_allclose(vals2, [[.243, 0]], rtol=1e-8)
vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9])
assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8)
vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9])
assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8)
@pytest.mark.parametrize("n", [0, 5])
def test_cov(self, n):
cov1 = multinomial.cov(n, (.2, .3, .5))
cov2 = [[n*.2*.8, -n*.2*.3, -n*.2*.5],
[-n*.3*.2, n*.3*.7, -n*.3*.5],
[-n*.5*.2, -n*.5*.3, n*.5*.5]]
assert_allclose(cov1, cov2, rtol=1e-8)
def test_cov_broadcasting(self):
cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]])
cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]]
assert_allclose(cov1, cov2, rtol=1e-8)
cov3 = multinomial.cov([4, 5], [.1, .9])
cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]]
assert_allclose(cov3, cov4, rtol=1e-8)
cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]],
[[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]]
assert_allclose(cov5, cov6, rtol=1e-8)
@pytest.mark.parametrize("n", [0, 2])
def test_entropy(self, n):
# this is equivalent to a binomial distribution with n=2, so the
# entropy .77899774929 is easily computed "by hand"
ent0 = multinomial.entropy(n, [.2, .8])
assert_allclose(ent0, binom.entropy(n, .2), rtol=1e-8)
def test_entropy_broadcasting(self):
ent0 = multinomial.entropy([2, 3], [.2, .8])
assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)],
rtol=1e-8)
ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]])
assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)],
rtol=1e-8)
ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]])
assert_allclose(ent2,
[[binom.entropy(7, .3), binom.entropy(7, .4)],
[binom.entropy(8, .3), binom.entropy(8, .4)]],
rtol=1e-8)
@pytest.mark.parametrize("n", [0, 5])
def test_mean(self, n):
mean1 = multinomial.mean(n, [.2, .8])
assert_allclose(mean1, [n*.2, n*.8], rtol=1e-8)
def test_mean_broadcasting(self):
mean1 = multinomial.mean([5, 6], [.2, .8])
assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8)
def test_frozen(self):
# The frozen distribution should agree with the regular one
n = 12
pvals = (.1, .2, .3, .4)
x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]]
x = np.asarray(x, dtype=np.float64)
mn_frozen = multinomial(n, pvals)
assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals))
assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals))
assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals))
def test_gh_11860(self):
# gh-11860 reported cases in which the adjustments made by multinomial
# to the last element of `p` can cause `nan`s even when the input is
# essentially valid. Check that a pathological case returns a finite,
# nonzero result. (This would fail in main before the PR.)
n = 88
rng = np.random.default_rng(8879715917488330089)
p = rng.random(n)
p[-1] = 1e-30
p /= np.sum(p)
x = np.ones(n)
logpmf = multinomial.logpmf(x, n, p)
assert np.isfinite(logpmf)
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_gh_22565(self, dtype):
# Same issue as gh-11860 above, essentially, but the original
# fix didn't completely solve the problem.
n = 19
p = np.asarray([0.2, 0.2, 0.2, 0.2, 0.2], dtype=dtype)
res1 = multinomial.pmf(x=[1, 2, 5, 7, 4], n=n, p=p)
res2 = multinomial.pmf(x=[1, 2, 4, 5, 7], n=n, p=p)
np.testing.assert_allclose(res1, res2, rtol=1e-15)
| TestMultinomial |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 729,
"end": 819
} | class ____(return_class): # [inherit-non-class]
""" Can't inherit from function. """
| Bad3 |
python | aimacode__aima-python | making_simple_decision4e.py | {
"start": 3795,
"end": 5869
} | class ____:
"""Map which provides probability distributions and sensor readings.
Consists of discrete cells which are either an obstacle or empty"""
def __init__(self, m):
self.m = m
self.nrows = len(m)
self.ncols = len(m[0])
# list of empty spaces in the map
self.empty = [(i, j) for i in range(self.nrows) for j in range(self.ncols) if not m[i][j]]
def sample(self):
"""Returns a random kinematic state possible in the map"""
pos = random.choice(self.empty)
# 0N 1E 2S 3W
orient = random.choice(range(4))
kin_state = pos + (orient,)
return kin_state
def ray_cast(self, sensor_num, kin_state):
"""Returns distace to nearest obstacle or map boundary in the direction of sensor"""
pos = kin_state[:2]
orient = kin_state[2]
# sensor layout when orientation is 0 (towards North)
# 0
# 3R1
# 2
delta = ((sensor_num % 2 == 0) * (sensor_num - 1), (sensor_num % 2 == 1) * (2 - sensor_num))
# sensor direction changes based on orientation
for _ in range(orient):
delta = (delta[1], -delta[0])
range_count = 0
while (0 <= pos[0] < self.nrows) and (0 <= pos[1] < self.nrows) and (not self.m[pos[0]][pos[1]]):
pos = vector_add(pos, delta)
range_count += 1
return range_count
def monte_carlo_localization(a, z, N, P_motion_sample, P_sensor, m, S=None):
"""Monte Carlo localization algorithm from Fig 25.9"""
def ray_cast(sensor_num, kin_state, m):
return m.ray_cast(sensor_num, kin_state)
M = len(z)
W = [0] * N
S_ = [0] * N
W_ = [0] * N
v = a['v']
w = a['w']
if S is None:
S = [m.sample() for _ in range(N)]
for i in range(N):
S_[i] = P_motion_sample(S[i], v, w)
W_[i] = 1
for j in range(M):
z_ = ray_cast(j, S_[i], m)
W_[i] = W_[i] * P_sensor(z[j], z_)
S = weighted_sample_with_replacement(N, S_, W_)
return S
| MCLmap |
python | tensorflow__tensorflow | tensorflow/python/ops/gradients_test.py | {
"start": 56373,
"end": 63708
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _TestFnVariablesGradient(self, inputs, test_fn, vars_to_grad):
"""Returns gradients of `test_model` with respect to `vars_to_grad`."""
test_fn_re = custom_gradient.recompute_grad(test_fn)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(vars_to_grad)
out_re = test_fn_re(inputs, vars_to_grad)
out = test_fn(inputs, vars_to_grad)
grads_re = tape.gradient(out_re, vars_to_grad)
grads = tape.gradient(out, vars_to_grad)
return grads_re, grads
def _grad(self, f, argnums=0):
"""Return a function which computes the gradient of `f`."""
def F(*params):
with backprop.GradientTape() as tape:
tape.watch(params)
outputs = f(*params)
return tape.gradient(
outputs,
params[argnums],
unconnected_gradients=unconnected_gradients.UnconnectedGradients.ZERO)
return F
def _test_gradients(self, f, inputs, order, delta=1e-3, rtol=1e-2, atol=1e-6):
"""Tests backward jacobians of `f`'s [0, `order`)-order gradients."""
if order < 1:
raise ValueError(
"`order` should be a positive integer, got '{}'.".format(order))
if order > 1:
self._test_gradients(
f=self._grad(f),
inputs=inputs,
order=order - 1,
delta=delta,
rtol=rtol,
atol=atol)
sym_jac_back, num_jac = gradient_checker_v2.compute_gradient(
f, inputs, delta=delta)
self.assertAllClose(num_jac, sym_jac_back, rtol=rtol, atol=atol)
def testRecomputeGradWrapped(self):
def f(x): # pylint: disable=invalid-name
return 2 * x
g = custom_gradient.recompute_grad(f)
self.assertIs(g.__wrapped__, f)
def testRecomputeGradZeroSizeInput(self):
def F(x):
return 2 * x
x = array_ops.constant(())
grads_re = self._grad(custom_gradient.recompute_grad(F))(x)
grads = self._grad(F)(x)
self.assertAllClose(grads_re, grads)
f_graph = def_function.function(
F, input_signature=[tensor.TensorSpec(None)])
grads_re = self._grad(custom_gradient.recompute_grad(f_graph))(x)
grads = self._grad(f_graph)(x)
self.assertAllClose(grads_re, grads)
def testRecomputeGradDifferentDtypesInputs(self):
def F(x1, x2):
return 2 * x1, 2 * x2
x1 = array_ops.constant(1, dtype=dtypes.int32)
x2 = array_ops.constant(1., dtype=dtypes.float32)
grads_re = self._grad(custom_gradient.recompute_grad(F))(x1, x2)
grads = self._grad(F)(x1, x2)
self.assertAllClose(grads_re, grads)
f_graph = def_function.function(
F,
input_signature=[
tensor.TensorSpec(None, dtype=dtypes.int32),
tensor.TensorSpec(None, dtype=dtypes.float32),
])
grads_re = self._grad(custom_gradient.recompute_grad(f_graph))(x1, x2)
grads = self._grad(f_graph)(x1, x2)
self.assertAllClose(grads_re, grads)
@test_util.run_v2_only
def testCustomGradientRecomputeGradHigherOrder(self):
@custom_gradient.recompute_grad
def F(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
self._test_gradients(F, [constant_op.constant([1.])], order=3)
@test_util.run_in_graph_and_eager_modes
def testFnRecompute(self):
"""Checks that recompute_grad works grads of function args."""
def TestFn(inputs, input_vars):
return inputs * input_vars
def TestFnSeq(inputs, input_vars):
return (inputs * input_vars, inputs * input_vars * 2.0)
with variable_scope.variable_scope("test", use_resource=True):
test_var = variable_scope.get_variable(
name="test_var",
shape=10,
trainable=True,
)
self.evaluate(test_var.assign(np.ones([10])))
test_input = constant(np.ones((10, 10), dtype=np.float32))
grads_re, grads = self._TestFnVariablesGradient(test_input, TestFn,
test_input)
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
grads_re, grads = self._TestFnVariablesGradient(test_input, TestFn,
test_var)
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
# Regression test for wrapping sequence outputting functions.
grads_re, grads = self._TestFnVariablesGradient(test_input, TestFnSeq,
test_input)
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
grads_re, grads = self._TestFnVariablesGradient(test_input, TestFnSeq,
test_var)
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testFnRecomputeWithScopeGradient(self, use_tape):
"""Checks that recompute_grad works with var scope and GradientTape."""
def TestFn(input_t):
with variable_scope.variable_scope("inner_scope"):
test_var = variable_scope.get_variable(
name="test_var",
shape=10,
trainable=True,
)
return input_t * test_var
test_input_t = constant(np.zeros((10, 10), dtype=np.float32))
with variable_scope.variable_scope(
"output_scope", reuse=variable_scope.AUTO_REUSE, use_resource=True):
test_fn_re = custom_gradient.recompute_grad(TestFn)
with test_util.AbstractGradientTape(
use_tape=use_tape, persistent=True) as tape:
out_re = test_fn_re(test_input_t)
out = TestFn(test_input_t)
self.evaluate(variables.global_variables_initializer())
grads_re = tape.gradient(out_re, variables.trainable_variables())
grads = tape.gradient(out, variables.trainable_variables())
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
@test_util.run_in_graph_and_eager_modes
def testFnRecomputeSameTensor(self):
"""Check recompute_grad when wrapped f called as f(x, x) - b/147369366."""
def TestFnMul(x, y):
return x * y
def TestFnSingleVar(x, y):
# pylint: disable=unused-argument
return x
with variable_scope.variable_scope("test", use_resource=True):
x = array_ops.ones((10))
grads_re, grads = self._TestFnVariablesGradient(x, TestFnMul,
x)
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
grads_re, grads = self._TestFnVariablesGradient(x, TestFnSingleVar,
x)
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
| VariablesGradientTest |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 62484,
"end": 68507
} | class ____(fixtures.TestBase):
def test_get_cls_kwargs(self):
class A:
def __init__(self, a):
pass
class A1(A):
def __init__(self, a1):
pass
class A11(A1):
def __init__(self, a11, **kw):
pass
class B:
def __init__(self, b, **kw):
pass
class B1(B):
def __init__(self, b1, **kw):
pass
class B2(B):
def __init__(self, b2):
pass
class AB(A, B):
def __init__(self, ab):
pass
class BA(B, A):
def __init__(self, ba, **kwargs):
pass
class BA1(BA):
pass
class CAB(A, B):
pass
class CBA(B, A):
pass
class CB1A1(B1, A1):
pass
class CAB1(A, B1):
pass
class CB1A(B1, A):
pass
class CB2A(B2, A):
pass
class D:
pass
class BA2(B, A):
pass
class A11B1(A11, B1):
pass
def test(cls, *expected):
eq_(set(util.get_cls_kwargs(cls)), set(expected))
test(A, "a")
test(A1, "a1")
test(A11, "a11", "a1")
test(B, "b")
test(B1, "b1", "b")
test(AB, "ab")
test(BA, "ba", "b", "a")
test(BA1, "ba", "b", "a")
test(CAB, "a")
test(CBA, "b", "a")
test(CAB1, "a")
test(CB1A, "b1", "b", "a")
test(CB2A, "b2")
test(CB1A1, "a1", "b1", "b")
test(D)
test(BA2, "a", "b")
test(A11B1, "a1", "a11", "b", "b1")
def test_get_func_kwargs(self):
def f1():
pass
def f2(foo):
pass
def f3(*foo):
pass
def f4(**foo):
pass
def test(fn, *expected):
eq_(set(util.get_func_kwargs(fn)), set(expected))
test(f1)
test(f2, "foo")
test(f3)
test(f4)
def test_callable_argspec_fn(self):
def foo(x, y, **kw):
pass
eq_(
get_callable_argspec(foo),
compat.FullArgSpec(["x", "y"], None, "kw", None, [], None, {}),
)
def test_callable_argspec_fn_no_self(self):
def foo(x, y, **kw):
pass
eq_(
get_callable_argspec(foo, no_self=True),
compat.FullArgSpec(["x", "y"], None, "kw", None, [], None, {}),
)
def test_callable_argspec_fn_no_self_but_self(self):
def foo(self, x, y, **kw):
pass
eq_(
get_callable_argspec(foo, no_self=True),
compat.FullArgSpec(
["self", "x", "y"], None, "kw", None, [], None, {}
),
)
@testing.requires.cpython
def test_callable_argspec_py_builtin(self):
import datetime
assert_raises(TypeError, get_callable_argspec, datetime.datetime.now)
@testing.requires.cpython
def test_callable_argspec_obj_init(self):
assert_raises(TypeError, get_callable_argspec, object)
def test_callable_argspec_method(self):
class Foo:
def foo(self, x, y, **kw):
pass
eq_(
get_callable_argspec(Foo.foo),
compat.FullArgSpec(
["self", "x", "y"], None, "kw", None, [], None, {}
),
)
def test_callable_argspec_instance_method_no_self(self):
class Foo:
def foo(self, x, y, **kw):
pass
eq_(
get_callable_argspec(Foo().foo, no_self=True),
compat.FullArgSpec(["x", "y"], None, "kw", None, [], None, {}),
)
def test_callable_argspec_unbound_method_no_self(self):
class Foo:
def foo(self, x, y, **kw):
pass
eq_(
get_callable_argspec(Foo.foo, no_self=True),
compat.FullArgSpec(
["self", "x", "y"], None, "kw", None, [], None, {}
),
)
def test_callable_argspec_init(self):
class Foo:
def __init__(self, x, y):
pass
eq_(
get_callable_argspec(Foo),
compat.FullArgSpec(
["self", "x", "y"], None, None, None, [], None, {}
),
)
def test_callable_argspec_init_no_self(self):
class Foo:
def __init__(self, x, y):
pass
eq_(
get_callable_argspec(Foo, no_self=True),
compat.FullArgSpec(["x", "y"], None, None, None, [], None, {}),
)
def test_callable_argspec_call(self):
class Foo:
def __call__(self, x, y):
pass
eq_(
get_callable_argspec(Foo()),
compat.FullArgSpec(
["self", "x", "y"], None, None, None, [], None, {}
),
)
def test_callable_argspec_call_no_self(self):
class Foo:
def __call__(self, x, y):
pass
eq_(
get_callable_argspec(Foo(), no_self=True),
compat.FullArgSpec(["x", "y"], None, None, None, [], None, {}),
)
@testing.requires.cpython
def test_callable_argspec_partial(self):
from functools import partial
def foo(x, y, z, **kw):
pass
bar = partial(foo, 5)
assert_raises(TypeError, get_callable_argspec, bar)
def test_getargspec_6_tuple(self):
def foo(x, y, z, **kw):
pass
spec = compat.inspect_getfullargspec(foo)
eq_(
spec,
compat.FullArgSpec(
args=["x", "y", "z"],
varargs=None,
varkw="kw",
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
| ArgInspectionTest |
python | pytorch__pytorch | torch/utils/data/_utils/fetch.py | {
"start": 266,
"end": 615
} | class ____:
def __init__(self, dataset, auto_collation, collate_fn, drop_last) -> None:
self.dataset = dataset
self.auto_collation = auto_collation
self.collate_fn = collate_fn
self.drop_last = drop_last
def fetch(self, possibly_batched_index) -> NoReturn:
raise NotImplementedError
| _BaseDatasetFetcher |
python | wandb__wandb | tests/unit_tests/test_internal_api.py | {
"start": 22052,
"end": 31929
} | class ____:
"""Test the retry logic of upload_file_retry.
Testing the file-upload logic itself is done in TestUploadFile, above;
this class just tests the retry logic (though it does make a couple
assumptions about status codes, like "400 isn't retriable, 500 is.")
"""
@pytest.mark.parametrize(
["schedule", "num_requests"],
[
([200, 0], 1),
([500, 500, 200, 0], 3),
],
)
def test_stops_after_success(
self,
example_file: Path,
mock_responses: RequestsMock,
schedule: Sequence[int],
num_requests: int,
):
handler = Mock(side_effect=[(status, {}, "") for status in schedule])
mock_responses.add_callback("PUT", "http://example.com/upload-dst", handler)
internal.InternalApi().upload_file_retry(
"http://example.com/upload-dst",
example_file.open("rb"),
)
assert handler.call_count == num_requests
def test_stops_after_bad_status(
self,
example_file: Path,
mock_responses: RequestsMock,
):
handler = Mock(side_effect=[(400, {}, "")])
mock_responses.add_callback("PUT", "http://example.com/upload-dst", handler)
with pytest.raises(wandb.errors.CommError):
internal.InternalApi().upload_file_retry(
"http://example.com/upload-dst",
example_file.open("rb"),
)
assert handler.call_count == 1
def test_stops_after_retry_limit_exceeded(
self,
example_file: Path,
mock_responses: RequestsMock,
):
num_retries = 8
handler = Mock(return_value=(500, {}, ""))
mock_responses.add_callback("PUT", "http://example.com/upload-dst", handler)
with pytest.raises(wandb.errors.CommError):
internal.InternalApi().upload_file_retry(
"http://example.com/upload-dst",
example_file.open("rb"),
num_retries=num_retries,
)
assert handler.call_count == num_retries + 1
ENABLED_FEATURE_RESPONSE = {
"serverInfo": {
"features": [
{"name": "LARGE_FILENAMES", "isEnabled": True},
{"name": "ARTIFACT_TAGS", "isEnabled": False},
]
}
}
@pytest.fixture
def mock_client(mocker: MockerFixture):
mock = mocker.patch("wandb.sdk.internal.internal_api.Client")
mock.return_value = mocker.Mock()
yield mock.return_value
@pytest.fixture
def mock_client_with_enabled_features(mock_client):
mock_client.execute.return_value = ENABLED_FEATURE_RESPONSE
yield mock_client
NO_FEATURES_RESPONSE = {"serverInfo": {"features": []}}
@pytest.fixture
def mock_client_with_no_features(mock_client):
mock_client.execute.return_value = NO_FEATURES_RESPONSE
yield mock_client
@pytest.fixture
def mock_client_with_error_no_field(mock_client):
error_msg = 'Cannot query field "features" on type "ServerInfo".'
mock_client.execute.side_effect = Exception(error_msg)
yield mock_client
@pytest.fixture
def mock_client_with_random_error(mock_client):
error_msg = "Some random error"
mock_client.execute.side_effect = Exception(error_msg)
yield mock_client
@pytest.mark.parametrize(
"fixture_name, feature, expected_result, expected_error",
[
(
# Test enabled features
mock_client_with_enabled_features.__name__,
ServerFeature.LARGE_FILENAMES,
True,
False,
),
(
# Test disabled features
mock_client_with_enabled_features.__name__,
ServerFeature.ARTIFACT_TAGS,
False,
False,
),
(
# Test features not in response
mock_client_with_enabled_features.__name__,
ServerFeature.ARTIFACT_REGISTRY_SEARCH,
False,
False,
),
(
# Test empty features list
mock_client_with_no_features.__name__,
ServerFeature.LARGE_FILENAMES,
False,
False,
),
(
# Test server not supporting features
mock_client_with_error_no_field.__name__,
ServerFeature.LARGE_FILENAMES,
False,
False,
),
(
# Test other server errors
mock_client_with_random_error.__name__,
ServerFeature.LARGE_FILENAMES,
False,
True,
),
],
)
@pytest.mark.usefixtures("patch_apikey", "patch_prompt")
def test_server_feature_checks(
request,
fixture_name,
feature: ServerFeature,
expected_result,
expected_error,
):
"""Test check_server_feature with various scenarios."""
request.getfixturevalue(fixture_name)
api = internal.InternalApi()
if expected_error:
with pytest.raises(Exception, match="Some random error"):
api._server_supports(feature)
else:
result = api._server_supports(feature)
assert result == expected_result
def test_construct_use_artifact_query_with_every_field(mocker: MockerFixture):
# Create mock internal API instance
api = internal.InternalApi()
mocker.patch.object(api, "settings", side_effect=lambda x: "default-" + x)
# Mock the server introspection methods
mocker.patch.object(
api,
"server_use_artifact_input_introspection",
return_value={"usedAs": "String"},
)
# Simulate server support for ALL known features
mock_server_features = dict.fromkeys(
chain(ServerFeature.keys(), ServerFeature.values()),
True,
)
mocker.patch.object(api, "_server_features", return_value=mock_server_features)
test_cases = [
{
"entity_name": "test-entity",
"project_name": "test-project",
"run_name": "test-run",
"artifact_id": "test-artifact-id",
"use_as": "test-use-as",
"artifact_entity_name": "test-artifact-entity",
"artifact_project_name": "test-artifact-project",
},
{
"entity_name": None,
"project_name": None,
"run_name": None,
"artifact_id": "test-artifact-id",
"use_as": None,
"artifact_entity_name": "test-artifact-entity",
"artifact_project_name": "test-artifact-project",
},
]
for case in test_cases:
query, variables = api._construct_use_artifact_query(
entity_name=case["entity_name"],
project_name=case["project_name"],
run_name=case["run_name"],
artifact_id=case["artifact_id"],
use_as=case["use_as"],
artifact_entity_name=case["artifact_entity_name"],
artifact_project_name=case["artifact_project_name"],
)
# Verify variables are correctly set
expected_variables = {
"entityName": case["entity_name"] or "default-entity",
"projectName": case["project_name"] or "default-project",
"runName": case["run_name"],
"artifactID": case["artifact_id"],
"usedAs": case["use_as"],
"artifactEntityName": case["artifact_entity_name"],
"artifactProjectName": case["artifact_project_name"],
}
assert variables == expected_variables
query_str = str(query)
assert "artifactEntityName" in query_str
assert "artifactProjectName" in query_str
if case["use_as"]:
assert "usedAs" in query_str
else:
assert "usedAs" not in query_str
def test_construct_use_artifact_query_without_entity_project():
# Test when server doesn't support entity/project information
api = internal.InternalApi()
api.settings = Mock(side_effect=lambda x: "default-" + x)
# Mock methods to return False for entity/project support
api.server_use_artifact_input_introspection = Mock(
return_value={"usedAs": "String"}
)
api._server_features = Mock(return_value={})
query, variables = api._construct_use_artifact_query(
entity_name="test-entity",
project_name="test-project",
run_name="test-run",
artifact_id="test-artifact-id",
use_as="test-use-as",
artifact_entity_name="test-artifact-entity",
artifact_project_name="test-artifact-project",
)
query_str = str(query)
# Verify entity/project information is not in variables
assert "artifactEntityName" not in variables
assert "artifactProjectName" not in variables
assert "artifactEntityName" not in query_str
assert "artifactProjectName" not in query_str
def test_construct_use_artifact_query_without_used_as():
# Test when server doesn't support usedAs field
api = internal.InternalApi()
api.settings = Mock(side_effect=lambda x: "default-" + x)
# Mock methods to return empty dict for introspection
api.server_use_artifact_input_introspection = Mock(return_value={})
# Simulate server support for ALL known features
mock_server_features = dict.fromkeys(
chain(ServerFeature.keys(), ServerFeature.values()),
True,
)
api._server_features = Mock(return_value=mock_server_features)
query, variables = api._construct_use_artifact_query(
entity_name="test-entity",
project_name="test-project",
run_name="test-run",
artifact_id="test-artifact-id",
use_as="test-use-as",
artifact_entity_name="test-artifact-entity",
artifact_project_name="test-artifact-project",
)
query_str = str(query)
# Verify usedAs is still in variables but not in query
assert "usedAs" in variables
assert "usedAs:" not in query_str
| TestUploadFileRetry |
python | spack__spack | lib/spack/spack/vendor/attr/exceptions.py | {
"start": 834,
"end": 992
} | class ____(ValueError):
"""
A non-``attrs`` class has been passed into an ``attrs`` function.
.. versionadded:: 16.2.0
"""
| NotAnAttrsClassError |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 11556,
"end": 13556
} | class ____(Base):
__tablename__ = "metrics"
__table_args__ = (
PrimaryKeyConstraint(
"key", "timestamp", "step", "run_uuid", "value", "is_nan", name="metric_pk"
),
Index(f"index_{__tablename__}_run_uuid", "run_uuid"),
)
key = Column(String(250))
"""
Metric key: `String` (limit 250 characters). Part of *Primary Key* for ``metrics`` table.
"""
value = Column(sa.types.Float(precision=53), nullable=False)
"""
Metric value: `Float`. Defined as *Non-null* in schema.
"""
timestamp = Column(BigInteger, default=get_current_time_millis)
"""
Timestamp recorded for this metric entry: `BigInteger`. Part of *Primary Key* for
``metrics`` table.
"""
step = Column(BigInteger, default=0, nullable=False)
"""
Step recorded for this metric entry: `BigInteger`.
"""
is_nan = Column(Boolean(create_constraint=True), nullable=False, default=False)
"""
True if the value is in fact NaN.
"""
run_uuid = Column(String(32), ForeignKey("runs.run_uuid"))
"""
Run UUID to which this metric belongs to: Part of *Primary Key* for ``metrics`` table.
*Foreign Key* into ``runs`` table.
"""
run = relationship("SqlRun", backref=backref("metrics", cascade="all"))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`.
"""
def __repr__(self):
return f"<SqlMetric({self.key}, {self.value}, {self.timestamp}, {self.step})>"
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
Returns:
mlflow.entities.Metric: Description of the return value.
"""
return Metric(
key=self.key,
value=self.value if not self.is_nan else float("nan"),
timestamp=self.timestamp,
step=self.step,
)
| SqlMetric |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/workflows.py | {
"start": 11998,
"end": 14905
} | class ____(GoogleCloudBaseOperator):
"""
Lists Workflows in a given project and location; the default order is not specified.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsListWorkflowsOperator`
:param filter_: Filter to restrict results to specific workflows.
:param order_by: Comma-separated list of fields that
specifies the order of the results. Default sorting order for a field is ascending.
To specify descending order for a field, append a "desc" suffix.
If not specified, the results will be returned in an unspecified order.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "order_by", "filter_")
operator_extra_links = (WorkflowsListOfWorkflowsLink(),)
def __init__(
self,
*,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.filter_ = filter_
self.order_by = order_by
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Retrieving workflows")
workflows_iter = hook.list_workflows(
filter_=self.filter_,
order_by=self.order_by,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsListOfWorkflowsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return [Workflow.to_dict(w) for w in workflows_iter]
| WorkflowsListWorkflowsOperator |
python | getsentry__sentry | tests/sentry/core/endpoints/scim/test_scim_team_details.py | {
"start": 11641,
"end": 12212
} | class ____(SCIMTestCase):
endpoint = "sentry-api-0-organization-scim-team-details"
method = "delete"
@patch("sentry.core.endpoints.scim.teams.metrics")
def test_delete_team(self, mock_metrics: MagicMock) -> None:
team = self.create_team(organization=self.organization, idp_provisioned=True)
self.get_success_response(self.organization.slug, team.id, status_code=204)
assert Team.objects.get(id=team.id).status == TeamStatus.PENDING_DELETION
mock_metrics.incr.assert_called_with("sentry.scim.team.delete")
| SCIMDetailDeleteTest |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 10670,
"end": 12123
} | class ____(EllipticCurveSignatureAlgorithm):
def __init__(
self,
algorithm: asym_utils.Prehashed | hashes.HashAlgorithm,
deterministic_signing: bool = False,
):
from cryptography.hazmat.backends.openssl.backend import backend
if (
deterministic_signing
and not backend.ecdsa_deterministic_supported()
):
raise UnsupportedAlgorithm(
"ECDSA with deterministic signature (RFC 6979) is not "
"supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM,
)
self._algorithm = algorithm
self._deterministic_signing = deterministic_signing
@property
def algorithm(
self,
) -> asym_utils.Prehashed | hashes.HashAlgorithm:
return self._algorithm
@property
def deterministic_signing(
self,
) -> bool:
return self._deterministic_signing
generate_private_key = rust_openssl.ec.generate_private_key
def derive_private_key(
private_value: int,
curve: EllipticCurve,
backend: typing.Any = None,
) -> EllipticCurvePrivateKey:
if not isinstance(private_value, int):
raise TypeError("private_value must be an integer type.")
if private_value <= 0:
raise ValueError("private_value must be a positive integer.")
return rust_openssl.ec.derive_private_key(private_value, curve)
| ECDSA |
python | kamyu104__LeetCode-Solutions | Python/maximum-size-of-a-set-after-removals.py | {
"start": 56,
"end": 486
} | class ____(object):
def maximumSetSize(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
lookup1, lookup2 = set(nums1), set(nums2)
n, c = len(nums1), len(lookup1&lookup2)
d1, d2 = min(len(lookup1)-c, n//2), min(len(lookup2)-c, n//2)
return min(n, d1+d2+c)
# Time: O(n)
# Space: O(n)
# math, hash table, greedy
| Solution |
python | jschneier__django-storages | storages/backends/sftpstorage.py | {
"start": 6907,
"end": 8164
} | class ____(File):
def __init__(self, name, storage, mode):
self.name = name
self.mode = mode
self.file = io.BytesIO()
self._storage = storage
self._is_read = False
self._is_dirty = False
@property
def size(self):
if not hasattr(self, "_size"):
self._size = self._storage.size(self.name)
return self._size
def read(self, num_bytes=None):
if not self._is_read:
self.file = self._storage._read(self.name)
self._is_read = True
return self.file.read(num_bytes)
def write(self, content):
if "w" not in self.mode:
raise AttributeError("File was opened for read-only access.")
self.file = io.BytesIO(content)
self._is_dirty = True
self._is_read = True
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and self._storage.exists(self.name):
self.file = self._storage._open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
if self._is_dirty:
self._storage._save(self.name, self)
self.file.close()
| SFTPStorageFile |
python | kamyu104__LeetCode-Solutions | Python/longest-common-subpath.py | {
"start": 1542,
"end": 2705
} | class ____(object):
def longestCommonSubpath(self, n, paths):
"""
:type n: int
:type paths: List[List[int]]
:rtype: int
"""
def RabinKarp(arr, x):
h = reduce(lambda h,x: (h*P+x)%MOD, (arr[i] for i in xrange(x)), 0)
power = pow(P, x, MOD)
lookup = {h}
for i in xrange(x, len(arr)):
h = (h*P - arr[i-x]*power + arr[i])%MOD
lookup.add(h)
return lookup
def check(paths, x):
intersect = RabinKarp(paths[0], x)
for i in xrange(1, len(paths)):
intersect = set.intersection(intersect, RabinKarp(paths[i], x))
if not intersect:
return False
return True
MOD, P = 10**11+19, max(x for p in paths for x in p)+1 # MOD is the min prime of 12-digit number
left, right = 1, min(len(p) for p in paths)
while left <= right:
mid = left + (right-left)//2
if not check(paths, mid):
right = mid-1
else:
left = mid+1
return right
| Solution2 |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 82447,
"end": 85841
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a Zone resource. All assets within a zone must be deleted before the zone can be deleted.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param zone_id: Required. Zone identifier.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:return: None
"""
template_fields = (
"project_id",
"lake_id",
"zone_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
zone_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.zone_id = zone_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Dataplex zone %s", self.zone_id)
operation = hook.delete_zone(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
zone_id=self.zone_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataplex zone %s deleted successfully!", self.zone_id)
| DataplexDeleteZoneOperator |
python | cython__cython | Cython/Debugger/Tests/test_libcython_in_gdb.py | {
"start": 5338,
"end": 5612
} | class ____(unittest.TestCase):
def test_parameters(self):
gdb.execute('set cy_colorize_code on')
assert libcython.parameters.colorize_code
gdb.execute('set cy_colorize_code off')
assert not libcython.parameters.colorize_code
| TestParameters |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/commands/ci/__init__.py | {
"start": 22790,
"end": 44319
} | class ____(Enum):
pex = "python-executable"
docker = "docker"
@app.command(help="Build selected or requested locations")
def build(
statedir: str = STATEDIR_OPTION,
location_name: list[str] = typer.Option([]),
build_directory: Optional[str] = typer.Option(
None,
help=(
"Directory root for building this code location. Read from dagster_cloud.yaml by"
" default."
),
),
build_strategy: BuildStrategy = typer.Option(
"docker",
help=(
"Build strategy used to build code locations. 'docker' builds a docker image."
" 'python-executable' builds a set of pex files."
),
),
docker_image_tag: Optional[str] = typer.Option(
None, help="Tag for built docker image. Auto-generated by default."
),
docker_base_image: Optional[str] = typer.Option(
None,
help="Base image used to build the docker image for --build-strategy=docker.",
),
docker_env: list[str] = typer.Option([], help="Env vars for docker builds."),
dockerfile_path: Optional[str] = typer.Option(
None,
help=(
"Path to a Dockerfile to use for the docker build. If not provided, a default templated Dockerfile is used."
),
),
python_version: str = typer.Option(
DEFAULT_PYTHON_VERSION,
help=(
"Python version used to build the python-executable; or to determine the default base"
" image for docker."
),
),
pex_build_method: deps.BuildMethod = typer.Option("local"),
pex_deps_cache_from: Optional[str] = None,
pex_deps_cache_to: Optional[str] = None,
pex_base_image_tag: Optional[str] = typer.Option(
None,
help="Base image used to run python executable for --build-strategy=python-executable.",
),
use_editable_dagster: bool = typer.Option(
False,
help="Include the editable dagster package in the Docker context for the build.",
),
) -> None:
build_impl(
statedir,
location_name,
build_directory,
build_strategy,
docker_image_tag,
docker_base_image,
docker_env,
dockerfile_path,
python_version,
pex_build_method,
pex_deps_cache_from,
pex_deps_cache_to,
pex_base_image_tag,
use_editable_dagster,
)
def build_impl(
statedir: str,
location_name: list[str],
build_directory: Optional[str],
build_strategy: BuildStrategy,
docker_image_tag: Optional[str],
docker_base_image: Optional[str],
docker_env: list[str],
dockerfile_path: Optional[str],
python_version: str,
pex_build_method: deps.BuildMethod,
pex_deps_cache_from: Optional[str],
pex_deps_cache_to: Optional[str],
pex_base_image_tag: Optional[str],
use_editable_dagster: bool,
):
if python_version:
# ensure version is parseable
pex_builder.util.parse_python_version(python_version)
if build_strategy == BuildStrategy.pex:
if docker_base_image or docker_image_tag:
raise ui.error(
"--base-image or --image-tag not supported for --build-strategy=python-executable."
)
if docker_base_image and dockerfile_path:
raise ui.error(
"--base-image and --dockerfile-path cannot both be provided. Please provide only one."
)
state_store = state.FileStore(statedir=statedir)
locations = _get_selected_locations(state_store, location_name)
ui.print("Going to build the following locations:")
for name in locations:
ui.print(f"- {name}")
for name, location_state in locations.items():
project_dir = location_state.project_dir
try:
configured_build_directory = (
location_state.build.build_config.directory
if (
location_state.build.build_config
and location_state.build.build_config.directory
)
else None
)
if build_directory and configured_build_directory:
ui.warn(
f"Overriding configured build:directory:{configured_build_directory!r} with"
f" cmdline provided --build-directory={build_directory!r}"
)
location_build_dir = build_directory
elif (not build_directory) and configured_build_directory:
location_build_dir = configured_build_directory
elif build_directory and (not configured_build_directory):
location_build_dir = build_directory
else:
location_build_dir = "."
if project_dir and not os.path.isabs(location_build_dir):
location_build_dir = str(pathlib.Path(project_dir) / location_build_dir)
url = location_state.url
api_token = get_user_token() or ""
if build_strategy == BuildStrategy.docker:
location_state.build_output = _build_docker(
url=url,
api_token=api_token,
name=name,
location_build_dir=location_build_dir,
docker_base_image=docker_base_image,
python_version=python_version,
docker_env=docker_env,
location_state=location_state,
dockerfile_path=dockerfile_path,
use_editable_dagster=use_editable_dagster,
)
state_store.save(location_state)
elif build_strategy == BuildStrategy.pex:
location_state.build_output = _build_pex(
url=url,
api_token=api_token,
name=name,
location_build_dir=location_build_dir,
python_version=python_version,
pex_build_method=pex_build_method,
pex_deps_cache_from=pex_deps_cache_from,
pex_deps_cache_to=pex_deps_cache_to,
pex_base_image_tag=pex_base_image_tag,
location_state=location_state,
)
state_store.save(location_state)
ui.print(
"Built and uploaded python executable"
f" {location_state.build_output.pex_tag} for location {name}"
)
except:
location_state.add_status_change(state.LocationStatus.failed, "build failed")
state_store.save(location_state)
raise
else:
location_state.add_status_change(state.LocationStatus.pending, "build successful")
state_store.save(location_state)
@metrics.instrument(
CliEventType.BUILD,
tags=[
CliEventTags.subcommand.dagster_cloud_ci,
CliEventTags.server_strategy.docker,
],
)
# url and api_token are used by the instrument decorator
def _build_docker(
url: str,
api_token: str,
name: str,
location_build_dir: str,
python_version: str,
docker_base_image: Optional[str],
docker_env: list[str],
location_state: state.LocationState,
use_editable_dagster: bool,
dockerfile_path: Optional[str] = None,
) -> state.DockerBuildOutput:
name = location_state.location_name
docker_utils.verify_docker()
registry_info = utils.get_registry_info(url)
docker_image_tag = docker_utils.default_image_tag(
location_state.deployment_name, name, location_state.build.commit_hash
)
if not dockerfile_path and not docker_base_image:
docker_base_image = f"python:{python_version}-slim"
ui.print(
f"Building docker image for location {name}"
+ (f" using base image {docker_base_image}" if docker_base_image else "")
)
retval = docker_utils.build_image(
location_build_dir,
docker_image_tag,
registry_info,
env_vars=docker_env,
base_image=docker_base_image,
dockerfile_path=dockerfile_path,
use_editable_dagster=use_editable_dagster,
)
if retval != 0:
raise ui.error(f"Failed to build docker image for location {name}")
retval = docker_utils.upload_image(docker_image_tag, registry_info)
if retval != 0:
raise ui.error(f"Failed to upload docker image for location {name}")
image = f"{registry_info['registry_url']}:{docker_image_tag}"
ui.print(f"Built and uploaded image {image} for location {name}")
return state.DockerBuildOutput(image=image)
@metrics.instrument(
CliEventType.BUILD,
tags=[CliEventTags.subcommand.dagster_cloud_ci, CliEventTags.server_strategy.pex],
)
# url and api_token are used by the instrument decorator
def _build_pex(
url: str,
api_token: str,
name: str,
location_build_dir: str,
python_version: str,
pex_build_method: deps.BuildMethod,
pex_deps_cache_from: Optional[str],
pex_deps_cache_to: Optional[str],
pex_base_image_tag: Optional[str],
location_state: state.LocationState,
) -> state.PexBuildOutput:
pex_location = parse_workspace.Location(
name,
directory=location_build_dir,
build_folder=location_build_dir,
location_file=location_state.location_file,
)
location_kwargs = pex_utils.build_upload_pex(
url=url,
api_token=api_token,
location=pex_location,
build_method=pex_build_method,
kwargs={
"python_version": python_version,
"base_image_tag": pex_base_image_tag,
"deps_cache_from": pex_deps_cache_from,
"deps_cache_to": pex_deps_cache_to,
},
)
return state.PexBuildOutput(
python_version=python_version,
image=location_kwargs.get("image"),
pex_tag=location_kwargs["pex_tag"],
)
@app.command(help="Update the current build session for an externally built docker image.")
def set_build_output(
statedir: str = STATEDIR_OPTION,
location_name: list[str] = typer.Option([]),
image_tag: str = typer.Option(
...,
help=(
"Tag for the built docker image. Note the registry must be specified in"
" dagster_cloud.yaml."
),
),
) -> None:
set_build_output_impl(
statedir,
location_name,
image_tag,
)
def set_build_output_impl(
statedir: str,
location_name: list[str],
image_tag: str,
) -> None:
state_store = state.FileStore(statedir=statedir)
locations = _get_selected_locations(state_store, location_name)
ui.print("Going to update the following locations:")
for name in locations:
ui.print(f"- {name}")
# validation pass - computes the full image name for all locations
images = {}
for name, location_state in locations.items():
configured_defs = load_dagster_cloud_yaml(
open(location_state.location_file, encoding="utf-8").read()
)
location_defs = [loc for loc in configured_defs.locations if loc.location_name == name]
if not location_defs:
raise ui.error(f"Location {name} not found in {location_state.location_file}")
location_def = location_defs[0]
registry = location_def.build.registry if location_def.build else None
if not registry:
raise ui.error(
f"No build:registry: defined for location {name} in {location_state.location_file}"
)
images[name] = f"{registry}:{image_tag}"
# save pass - save full image name computed in the previous pass for all locations
for name, location_state in locations.items():
# Update and save build state
location_state.build_output = state.DockerBuildOutput(image=images[name])
state_store.save(location_state)
ui.print(f"Recorded image {images[name]} for location {name}")
ui.print("Use 'ci deploy' to update dagster-cloud.")
@app.command(help="Deploy built code locations to dagster cloud.")
def deploy(
statedir: str = STATEDIR_OPTION,
location_name: list[str] = typer.Option([]),
location_load_timeout: int = LOCATION_LOAD_TIMEOUT_OPTION,
agent_heartbeat_timeout: int = get_agent_heartbeat_timeout_option(default_timeout=None),
):
deploy_impl(statedir, location_name, location_load_timeout, agent_heartbeat_timeout)
def deploy_impl(
statedir: str,
location_name: list[str],
location_load_timeout: int,
agent_heartbeat_timeout: Optional[int],
):
state_store = state.FileStore(statedir=statedir)
locations = _get_selected_locations(state_store, location_name)
ui.print("Going to deploy the following locations:")
built_locations: list[state.LocationState] = []
unbuilt_location_names: list[str] = []
for name, location_state in locations.items():
if location_state.build_output:
status = "Ready to deploy"
built_locations.append(location_state)
else:
status = "Not ready to deploy"
unbuilt_location_names.append(name)
ui.print(f"- {name} [{status}]")
if unbuilt_location_names:
raise ui.error(
f"Cannot deploy because the following location{'s have' if len(unbuilt_location_names) > 1 else ' has'} "
f"not been built: {', '.join(unbuilt_location_names)}. "
"Use 'ci build' (in Dagster+ Serverless) or `ci set-build-output` (in Dagster+ Hybrid) to build"
" locations."
)
if not built_locations:
ui.print("No locations to deploy")
return
try:
_deploy(
url=built_locations[0].url,
api_token=check.not_none(get_user_token()),
built_locations=built_locations,
location_load_timeout=location_load_timeout,
agent_heartbeat_timeout=agent_heartbeat_timeout,
)
except:
# unfortunately we do not know if only a subset of locations failed to deploy
for location_state in built_locations:
location_state.add_status_change(state.LocationStatus.failed, "deploy failed")
state_store.save(location_state)
raise
else:
for location_state in built_locations:
location_state.add_status_change(state.LocationStatus.success, "deploy successful")
state_store.save(location_state)
deployment_url = built_locations[0].url + "/" + built_locations[0].deployment_name
ui.print(f"View the status of your locations at {deployment_url}/locations.")
@metrics.instrument(CliEventType.DEPLOY, tags=[CliEventTags.subcommand.dagster_cloud_ci])
# url and api_token are used by the instrument decorator
def _deploy(
*,
url: str,
api_token: str,
built_locations: list[state.LocationState],
location_load_timeout: int,
agent_heartbeat_timeout: Optional[int],
):
locations_document = []
for location_state in built_locations:
build_output = location_state.build_output
if not build_output: # not necessary but keep type checker happy
continue
location_args = {
"image": build_output.image,
"location_file": location_state.location_file,
"git_url": location_state.build.git_url,
"commit_hash": location_state.build.commit_hash,
**(
{"defs_state_info": location_state.defs_state_info.model_dump()}
if location_state.defs_state_info
else {}
),
}
if build_output.strategy == "python-executable":
metrics.instrument_add_tags([CliEventTags.server_strategy.pex])
location_args["pex_tag"] = build_output.pex_tag
location_args["python_version"] = build_output.python_version
else:
metrics.instrument_add_tags([CliEventTags.server_strategy.docker])
locations_document.append(
get_location_document(location_state.location_name, location_args)
)
deployment_url = built_locations[0].url + "/" + built_locations[0].deployment_name
with utils.client_from_env(
built_locations[0].url, deployment=built_locations[0].deployment_name
) as client:
location_names = [location_state.location_name for location_state in built_locations]
gql.deploy_code_locations(client, {"locations": locations_document})
ui.print(
f"Updated code location{'s' if len(location_names) > 1 else ''} {', '.join(location_names)} in dagster-cloud."
)
if not agent_heartbeat_timeout:
agent_type = gql.fetch_agent_type(client)
# raise the agent heartbeat timeout for serverless deploys to ensure that the
# deploy doesn't fail if the agent is still being spun up. A deploy is serverless if
# that's the default agent type for the deployment and the locations aren't being
# deployed to some other agent queue (since serverless agents only serve the default
# agent queue)
if agent_type != DagsterPlusDeploymentAgentType.SERVERLESS or any(
location_config.get("agent_queue") for location_config in locations_document
):
agent_heartbeat_timeout = DEFAULT_HYBRID_AGENT_HEARTBEAT_TIMEOUT
else:
agent_heartbeat_timeout = DEFAULT_SERVERLESS_AGENT_HEARTBEAT_TIMEOUT
wait_for_load(
client,
location_names,
location_load_timeout=location_load_timeout,
agent_heartbeat_timeout=agent_heartbeat_timeout,
url=deployment_url,
)
dagster_dbt_app = typer.Typer(
hidden=True,
help="Dagster Cloud commands for managing the `dagster-dbt` integration.",
add_completion=False,
)
app.add_typer(dagster_dbt_app, name="dagster-dbt", no_args_is_help=True)
project_app = typer.Typer(
name="project",
no_args_is_help=True,
help="Commands for using a dbt project in Dagster.",
add_completion=False,
)
dagster_dbt_app.add_typer(project_app, name="project", no_args_is_help=True)
@project_app.command(
name="manage-state",
help="""
This CLI command will handle uploading and downloading dbt state, in the form of manifest.json,
if `state_path` is specified on `DbtProject`.
""",
)
def manage_state_command(
statedir: str = STATEDIR_OPTION,
file: Annotated[
Optional[pathlib.Path],
typer.Option(
help="The file containing DbtProject definitions to prepare.",
),
] = None,
components: Annotated[
Optional[pathlib.Path],
typer.Option(
help="The path to a dg project directory containing DbtProjectComponents.",
),
] = None,
source_deployment: Annotated[
str,
typer.Option(
help="Which deployment should upload its manifest.json.",
),
] = "prod",
key_prefix: Annotated[
str,
typer.Option(
help="A key prefix for the key the manifest.json is saved with.",
),
] = "",
):
try:
from dagster_dbt import DbtProject
except:
ui.print(
"Unable to import dagster_dbt. To use `manage-state`, dagster_dbt must be installed."
)
return
try:
from dagster._core.code_pointer import load_python_file
from dagster._core.definitions.module_loaders.utils import find_objects_in_module_of_types
except:
ui.print("Unable to import dagster. To use `manage-state`, dagster must be installed.")
return
state_store = state.FileStore(statedir=statedir)
locations = state_store.list_locations()
if not locations:
raise ui.error("Unable to determine deployment state.")
location = locations[0]
deployment_name = location.deployment_name
is_branch = location.is_branch_deployment
if file:
contents = load_python_file(file, None)
projects = find_objects_in_module_of_types(contents, DbtProject)
elif components:
from dagster_dbt.components.dbt_project.component import get_projects_from_dbt_component
projects = get_projects_from_dbt_component(components)
else:
raise click.UsageError("Must specify --file or --components")
for project in projects:
project = cast("DbtProject", project)
if project.state_path:
download_path = project.state_path.joinpath("manifest.json")
key = f"{key_prefix}{os.fspath(download_path)}"
if is_branch:
ui.print(f"Downloading {source_deployment} manifest for branch deployment.")
os.makedirs(project.state_path, exist_ok=True)
download_organization_artifact(key, download_path)
ui.print("Download complete.")
elif deployment_name == source_deployment:
ui.print(f"Uploading {source_deployment} manifest.")
upload_organization_artifact(key, project.manifest_path)
ui.print("Upload complete")
else:
ui.warn(
f"Deployment named {deployment_name} does not match source deployment {source_deployment}, taking no action. "
f"If this is the desired dbt state artifacts to upload, set the cli flags `--source-deployment {deployment_name}`."
)
| BuildStrategy |
python | gevent__gevent | _setuputils.py | {
"start": 14159,
"end": 14902
} | class ____(_Extension):
# This class has a few functions:
#
# 1. Make pylint happy in terms of attributes we use.
# 2. Add default arguments, often platform specific.
def __init__(self, *args, **kwargs):
self.libraries = []
self.define_macros = []
# Python 2 has this as an old-style class for some reason
# so super() doesn't work.
_Extension.__init__(self, *args, **kwargs) # pylint:disable=no-member,non-parent-init-called
from distutils.command.clean import clean # pylint:disable=no-name-in-module,import-error
from distutils import log # pylint:disable=no-name-in-module
from distutils.dir_util import remove_tree # pylint:disable=no-name-in-module,import-error
| Extension |
python | kamyu104__LeetCode-Solutions | Python/candy.py | {
"start": 29,
"end": 541
} | class ____(object):
# @param ratings, a list of integer
# @return an integer
def candy(self, ratings):
candies = [1 for _ in xrange(len(ratings))]
for i in xrange(1, len(ratings)):
if ratings[i] > ratings[i - 1]:
candies[i] = candies[i - 1] + 1
for i in reversed(xrange(1, len(ratings))):
if ratings[i - 1] > ratings[i] and candies[i - 1] <= candies[i]:
candies[i - 1] = candies[i] + 1
return sum(candies)
| Solution |
python | pypa__pip | src/pip/_vendor/packaging/metadata.py | {
"start": 26320,
"end": 34739
} | class ____:
"""Representation of distribution metadata.
Compared to :class:`RawMetadata`, this class provides objects representing
metadata fields instead of only using built-in types. Any invalid metadata
will cause :exc:`InvalidMetadata` to be raised (with a
:py:attr:`~BaseException.__cause__` attribute as appropriate).
"""
_raw: RawMetadata
@classmethod
def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata:
"""Create an instance from :class:`RawMetadata`.
If *validate* is true, all metadata will be validated. All exceptions
related to validation will be gathered and raised as an :class:`ExceptionGroup`.
"""
ins = cls()
ins._raw = data.copy() # Mutations occur due to caching enriched values.
if validate:
exceptions: list[Exception] = []
try:
metadata_version = ins.metadata_version
metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version)
except InvalidMetadata as metadata_version_exc:
exceptions.append(metadata_version_exc)
metadata_version = None
# Make sure to check for the fields that are present, the required
# fields (so their absence can be reported).
fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS
# Remove fields that have already been checked.
fields_to_check -= {"metadata_version"}
for key in fields_to_check:
try:
if metadata_version:
# Can't use getattr() as that triggers descriptor protocol which
# will fail due to no value for the instance argument.
try:
field_metadata_version = cls.__dict__[key].added
except KeyError:
exc = InvalidMetadata(key, f"unrecognized field: {key!r}")
exceptions.append(exc)
continue
field_age = _VALID_METADATA_VERSIONS.index(
field_metadata_version
)
if field_age > metadata_age:
field = _RAW_TO_EMAIL_MAPPING[key]
exc = InvalidMetadata(
field,
f"{field} introduced in metadata version "
f"{field_metadata_version}, not {metadata_version}",
)
exceptions.append(exc)
continue
getattr(ins, key)
except InvalidMetadata as exc:
exceptions.append(exc)
if exceptions:
raise ExceptionGroup("invalid metadata", exceptions)
return ins
@classmethod
def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata:
"""Parse metadata from email headers.
If *validate* is true, the metadata will be validated. All exceptions
related to validation will be gathered and raised as an :class:`ExceptionGroup`.
"""
raw, unparsed = parse_email(data)
if validate:
exceptions: list[Exception] = []
for unparsed_key in unparsed:
if unparsed_key in _EMAIL_TO_RAW_MAPPING:
message = f"{unparsed_key!r} has invalid data"
else:
message = f"unrecognized field: {unparsed_key!r}"
exceptions.append(InvalidMetadata(unparsed_key, message))
if exceptions:
raise ExceptionGroup("unparsed", exceptions)
try:
return cls.from_raw(raw, validate=validate)
except ExceptionGroup as exc_group:
raise ExceptionGroup(
"invalid or unparsed metadata", exc_group.exceptions
) from None
metadata_version: _Validator[_MetadataVersion] = _Validator()
""":external:ref:`core-metadata-metadata-version`
(required; validated to be a valid metadata version)"""
# `name` is not normalized/typed to NormalizedName so as to provide access to
# the original/raw name.
name: _Validator[str] = _Validator()
""":external:ref:`core-metadata-name`
(required; validated using :func:`~packaging.utils.canonicalize_name` and its
*validate* parameter)"""
version: _Validator[version_module.Version] = _Validator()
""":external:ref:`core-metadata-version` (required)"""
dynamic: _Validator[list[str] | None] = _Validator(
added="2.2",
)
""":external:ref:`core-metadata-dynamic`
(validated against core metadata field names and lowercased)"""
platforms: _Validator[list[str] | None] = _Validator()
""":external:ref:`core-metadata-platform`"""
supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-supported-platform`"""
summary: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-summary` (validated to contain no newlines)"""
description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body
""":external:ref:`core-metadata-description`"""
description_content_type: _Validator[str | None] = _Validator(added="2.1")
""":external:ref:`core-metadata-description-content-type` (validated)"""
keywords: _Validator[list[str] | None] = _Validator()
""":external:ref:`core-metadata-keywords`"""
home_page: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-home-page`"""
download_url: _Validator[str | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-download-url`"""
author: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-author`"""
author_email: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-author-email`"""
maintainer: _Validator[str | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-maintainer`"""
maintainer_email: _Validator[str | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-maintainer-email`"""
license: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-license`"""
license_expression: _Validator[NormalizedLicenseExpression | None] = _Validator(
added="2.4"
)
""":external:ref:`core-metadata-license-expression`"""
license_files: _Validator[list[str] | None] = _Validator(added="2.4")
""":external:ref:`core-metadata-license-file`"""
classifiers: _Validator[list[str] | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-classifier`"""
requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator(
added="1.2"
)
""":external:ref:`core-metadata-requires-dist`"""
requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator(
added="1.2"
)
""":external:ref:`core-metadata-requires-python`"""
# Because `Requires-External` allows for non-PEP 440 version specifiers, we
# don't do any processing on the values.
requires_external: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-requires-external`"""
project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-project-url`"""
# PEP 685 lets us raise an error if an extra doesn't pass `Name` validation
# regardless of metadata version.
provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator(
added="2.1",
)
""":external:ref:`core-metadata-provides-extra`"""
provides_dist: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-provides-dist`"""
obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-obsoletes-dist`"""
requires: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Requires`` (deprecated)"""
provides: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Provides`` (deprecated)"""
obsoletes: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Obsoletes`` (deprecated)"""
| Metadata |
python | doocs__leetcode | lcci/04.02.Minimum Height Tree/Solution.py | {
"start": 164,
"end": 478
} | class ____:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
def dfs(l: int, r: int) -> TreeNode:
if l > r:
return None
mid = (l + r) >> 1
return TreeNode(nums[mid], dfs(l, mid - 1), dfs(mid + 1, r))
return dfs(0, len(nums) - 1)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec49.py | {
"start": 200,
"end": 246
} | class ____(Generic[P]):
pass
| TaskDeclaration |
python | altair-viz__altair | altair/utils/schemapi.py | {
"start": 61107,
"end": 63971
} | class ____:
def __init__(self, prop: str, schema: dict[str, Any]) -> None:
self.prop = prop
self.schema = schema
def __get__(self, obj, cls):
from altair import vegalite
self.obj = obj
self.cls = cls
# The docs from the encoding class parameter (e.g. `bin` in X, Color,
# etc); this provides a general description of the parameter.
self.__doc__ = self.schema["description"].replace("__", "**")
property_name = f"{self.prop}"[0].upper() + f"{self.prop}"[1:]
if altair_prop := getattr(vegalite, property_name, None):
# Add the docstring from the helper class (e.g. `BinParams`) so
# that all the parameter names of the helper class are included in
# the final docstring
parameter_index = altair_prop.__doc__.find("Parameters\n")
if parameter_index > -1:
self.__doc__ = (
altair_prop.__doc__[:parameter_index].replace(" ", "")
+ self.__doc__
+ textwrap.dedent(
f"\n\n {altair_prop.__doc__[parameter_index:]}"
)
)
# For short docstrings such as Aggregate, Stack, et
else:
self.__doc__ = (
altair_prop.__doc__.replace(" ", "") + "\n" + self.__doc__
)
# Add signatures and tab completion for the method and parameter names
self.__signature__ = inspect.signature(altair_prop)
self.__wrapped__ = inspect.getfullargspec(altair_prop)
self.__name__ = altair_prop.__name__
else:
# It seems like bandPosition is the only parameter that doesn't
# have a helper class.
pass
return self
def __call__(self, *args: Any, **kwargs: Any):
obj = self.obj.copy()
# TODO: use schema to validate
obj[self.prop] = args[0] if args else kwargs
return obj
def with_property_setters(cls: type[TSchemaBase]) -> type[TSchemaBase]:
"""Decorator to add property setters to a Schema class."""
schema = cls.resolve_references()
for prop, propschema in schema.get("properties", {}).items():
setattr(cls, prop, _PropertySetter(prop, propschema))
return cls
VERSIONS: Mapping[
Literal[
"vega-datasets", "vega-embed", "vega-lite", "vegafusion", "vl-convert-python"
],
str,
] = {
"vega-datasets": "v3.2.1",
"vega-embed": "v7",
"vega-lite": "v6.1.0",
"vegafusion": "2.0.3",
"vl-convert-python": "1.8.0",
}
"""
Version pins for non-``python`` `vega projects`_.
Notes
-----
When cutting a new release, make sure to update ``[tool.altair.vega]`` in ``pyproject.toml``.
.. _vega projects:
https://github.com/vega
"""
| _PropertySetter |
python | apache__airflow | airflow-core/tests/unit/dag_processing/test_manager.py | {
"start": 3568,
"end": 57566
} | class ____:
@pytest.fixture(autouse=True)
def _disable_examples(self):
with conf_vars({("core", "load_examples"): "False"}):
yield
def setup_method(self):
clear_db_teams()
clear_db_assets()
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
clear_db_callbacks()
clear_db_import_errors()
clear_db_dag_bundles()
def teardown_class(self):
clear_db_teams()
clear_db_assets()
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
clear_db_callbacks()
clear_db_import_errors()
clear_db_dag_bundles()
def mock_processor(self, start_time: float | None = None) -> tuple[DagFileProcessorProcess, socket]:
proc = MagicMock()
logger_filehandle = MagicMock()
proc.create_time.return_value = time.time()
proc.wait.return_value = 0
read_end, write_end = socketpair()
ret = DagFileProcessorProcess(
process_log=MagicMock(),
id=uuid7(),
pid=1234,
process=proc,
stdin=write_end,
logger_filehandle=logger_filehandle,
client=MagicMock(),
)
if start_time:
ret.start_time = start_time
ret._open_sockets.clear()
return ret, read_end
@pytest.fixture
def clear_parse_import_errors(self):
clear_db_import_errors()
@pytest.mark.usefixtures("clear_parse_import_errors")
@conf_vars({("core", "load_examples"): "False"})
def test_remove_file_clears_import_error(self, tmp_path, configure_testing_dag_bundle):
path_to_parse = tmp_path / "temp_dag.py"
# Generate original import error
path_to_parse.write_text("an invalid airflow DAG")
with configure_testing_dag_bundle(path_to_parse):
manager = DagFileProcessorManager(
max_runs=1,
processor_timeout=365 * 86_400,
)
manager.run()
with create_session() as session:
import_errors = session.query(ParseImportError).all()
assert len(import_errors) == 1
path_to_parse.unlink()
# Rerun the parser once the dag file has been removed
manager.run()
with create_session() as session:
import_errors = session.query(ParseImportError).all()
assert len(import_errors) == 0
session.rollback()
@conf_vars({("core", "load_examples"): "False"})
def test_max_runs_when_no_files(self, tmp_path):
with conf_vars({("core", "dags_folder"): str(tmp_path)}):
manager = DagFileProcessorManager(max_runs=1)
manager.run()
# TODO: AIP-66 no asserts?
def test_start_new_processes_with_same_filepath(self, configure_testing_dag_bundle):
"""
Test that when a processor already exist with a filepath, a new processor won't be created
with that filepath. The filepath will just be removed from the list.
"""
with configure_testing_dag_bundle("/tmp"):
manager = DagFileProcessorManager(max_runs=1)
manager._dag_bundles = list(DagBundlesManager().get_all_dag_bundles())
file_1 = DagFileInfo(bundle_name="testing", rel_path=Path("file_1.py"), bundle_path=TEST_DAGS_FOLDER)
file_2 = DagFileInfo(bundle_name="testing", rel_path=Path("file_2.py"), bundle_path=TEST_DAGS_FOLDER)
file_3 = DagFileInfo(bundle_name="testing", rel_path=Path("file_3.py"), bundle_path=TEST_DAGS_FOLDER)
manager._file_queue = deque([file_1, file_2, file_3])
# Mock that only one processor exists. This processor runs with 'file_1'
manager._processors[file_1] = MagicMock()
# Start New Processes
with mock.patch.object(DagFileProcessorManager, "_create_process"):
manager._start_new_processes()
# Because of the config: '[dag_processor] parsing_processes = 2'
# verify that only one extra process is created
# and since a processor with 'file_1' already exists,
# even though it is first in '_file_path_queue'
# a new processor is created with 'file_2' and not 'file_1'.
assert file_1 in manager._processors.keys()
assert file_2 in manager._processors.keys()
assert deque([file_3]) == manager._file_queue
def test_handle_removed_files_when_processor_file_path_not_in_new_file_paths(self):
"""Ensure processors and file stats are removed when the file path is not in the new file paths"""
manager = DagFileProcessorManager(max_runs=1)
bundle_name = "testing"
file = DagFileInfo(
bundle_name=bundle_name, rel_path=Path("missing_file.txt"), bundle_path=TEST_DAGS_FOLDER
)
manager._processors[file] = MagicMock()
manager._file_stats[file] = DagFileStat()
manager.handle_removed_files({bundle_name: set()})
assert manager._processors == {}
assert file not in manager._file_stats
def test_handle_removed_files_when_processor_file_path_is_present(self):
"""handle_removed_files should not purge files that are still present."""
manager = DagFileProcessorManager(max_runs=1)
bundle_name = "testing"
file = DagFileInfo(bundle_name=bundle_name, rel_path=Path("abc.txt"), bundle_path=TEST_DAGS_FOLDER)
mock_processor = MagicMock()
manager._processors[file] = mock_processor
manager.handle_removed_files(known_files={bundle_name: {file}})
assert manager._processors == {file: mock_processor}
@conf_vars({("dag_processor", "file_parsing_sort_mode"): "alphabetical"})
def test_files_in_queue_sorted_alphabetically(self):
"""Test dag files are sorted alphabetically"""
file_names = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
dag_files = _get_file_infos(file_names)
ordered_dag_files = _get_file_infos(sorted(file_names))
manager = DagFileProcessorManager(max_runs=1)
known_files = {"some-bundle": set(dag_files)}
assert manager._file_queue == deque()
manager.prepare_file_queue(known_files=known_files)
assert manager._file_queue == deque(ordered_dag_files)
@conf_vars({("dag_processor", "file_parsing_sort_mode"): "random_seeded_by_host"})
def test_files_sorted_random_seeded_by_host(self):
"""Test files are randomly sorted and seeded by host name"""
f_infos = _get_file_infos(["file_3.py", "file_2.py", "file_4.py", "file_1.py"])
known_files = {"anything": f_infos}
manager = DagFileProcessorManager(max_runs=1)
assert manager._file_queue == deque()
manager.prepare_file_queue(known_files=known_files) # using list over test for reproducibility
random.Random(get_hostname()).shuffle(f_infos)
expected = deque(f_infos)
assert manager._file_queue == expected
# Verify running it again produces same order
manager._files = []
manager.prepare_file_queue(known_files=known_files)
assert manager._file_queue == expected
@conf_vars({("dag_processor", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("airflow.utils.file.os.path.getmtime", new=mock_get_mtime)
def test_files_sorted_by_modified_time(self):
"""Test files are sorted by modified time"""
paths_with_mtime = [
("file_3.py", 3.0),
("file_2.py", 2.0),
("file_4.py", 5.0),
("file_1.py", 4.0),
]
filenames = encode_mtime_in_filename(paths_with_mtime)
dag_files = _get_file_infos(filenames)
manager = DagFileProcessorManager(max_runs=1)
assert manager._file_queue == deque()
manager.prepare_file_queue(known_files={"any": set(dag_files)})
ordered_files = _get_file_infos(
[
"file_4-ss=5.0.py",
"file_1-ss=4.0.py",
"file_3-ss=3.0.py",
"file_2-ss=2.0.py",
]
)
assert manager._file_queue == deque(ordered_files)
@conf_vars({("dag_processor", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("airflow.utils.file.os.path.getmtime", new=mock_get_mtime)
def test_queued_files_exclude_missing_file(self):
"""Check that a file is not enqueued for processing if it has been deleted"""
file_and_mtime = [("file_3.py", 2.0), ("file_2.py", 3.0), ("file_4.py", FileNotFoundError)]
filenames = encode_mtime_in_filename(file_and_mtime)
file_infos = _get_file_infos(filenames)
manager = DagFileProcessorManager(max_runs=1)
manager.prepare_file_queue(known_files={"any": set(file_infos)})
ordered_files = _get_file_infos(["file_2-ss=3.0.py", "file_3-ss=2.0.py"])
assert manager._file_queue == deque(ordered_files)
@conf_vars({("dag_processor", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("airflow.utils.file.os.path.getmtime", new=mock_get_mtime)
def test_add_new_file_to_parsing_queue(self):
"""Check that new file is added to parsing queue"""
dag_files = _get_file_infos(["file_1-ss=2.0.py", "file_2-ss=3.0.py", "file_3-ss=4.0.py"])
from random import Random
Random("file_2.py").random()
manager = DagFileProcessorManager(max_runs=1)
manager.prepare_file_queue(known_files={"any": set(dag_files)})
assert set(manager._file_queue) == set(dag_files)
manager.prepare_file_queue(
known_files={"any": set((*dag_files, *_get_file_infos(["file_4-ss=1.0.py"])))}
)
# manager.add_files_to_queue()
ordered_files = _get_file_infos(
[
"file_3-ss=4.0.py",
"file_2-ss=3.0.py",
"file_1-ss=2.0.py",
"file_4-ss=1.0.py",
]
)
assert manager._file_queue == deque(ordered_files)
@conf_vars({("dag_processor", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_recently_modified_file_is_parsed_with_mtime_mode(self, mock_getmtime):
"""
Test recently updated files are processed even if min_file_process_interval is not reached
"""
freezed_base_time = timezone.datetime(2020, 1, 5, 0, 0, 0)
initial_file_1_mtime = (freezed_base_time - timedelta(minutes=5)).timestamp()
dag_file = DagFileInfo(
bundle_name="testing", rel_path=Path("file_1.py"), bundle_path=TEST_DAGS_FOLDER
)
known_files = {"does-not-matter": {dag_file}}
mock_getmtime.side_effect = [initial_file_1_mtime]
manager = DagFileProcessorManager(max_runs=3)
# let's say the DAG was just parsed 10 seconds before the Freezed time
last_finish_time = freezed_base_time - timedelta(seconds=10)
manager._file_stats = {
dag_file: DagFileStat(1, 0, last_finish_time, 1.0, 1, 1),
}
with time_machine.travel(freezed_base_time):
assert manager._file_queue == deque()
# File Path Queue will be empty as the "modified time" < "last finish time"
manager.prepare_file_queue(known_files=known_files)
assert manager._file_queue == deque()
# Simulate the DAG modification by using modified_time which is greater
# than the last_parse_time but still less than now - min_file_process_interval
file_1_new_mtime = freezed_base_time - timedelta(seconds=5)
file_1_new_mtime_ts = file_1_new_mtime.timestamp()
with time_machine.travel(freezed_base_time):
assert manager._file_queue == deque()
# File Path Queue will be empty as the "modified time" < "last finish time"
mock_getmtime.side_effect = [file_1_new_mtime_ts]
manager.prepare_file_queue(known_files=known_files)
# Check that file is added to the queue even though file was just recently passed
assert manager._file_queue == deque([dag_file])
assert last_finish_time < file_1_new_mtime
assert (
manager._file_process_interval
> (freezed_base_time - manager._file_stats[dag_file].last_finish_time).total_seconds()
)
def test_file_paths_in_queue_sorted_by_priority(self):
from airflow.models.dagbag import DagPriorityParsingRequest
parsing_request = DagPriorityParsingRequest(relative_fileloc="file_1.py", bundle_name="dags-folder")
with create_session() as session:
session.add(parsing_request)
session.commit()
file1 = DagFileInfo(
bundle_name="dags-folder", rel_path=Path("file_1.py"), bundle_path=TEST_DAGS_FOLDER
)
file2 = DagFileInfo(
bundle_name="dags-folder", rel_path=Path("file_2.py"), bundle_path=TEST_DAGS_FOLDER
)
manager = DagFileProcessorManager(max_runs=1)
manager._dag_bundles = list(DagBundlesManager().get_all_dag_bundles())
manager._file_queue = deque([file2, file1])
manager._queue_requested_files_for_parsing()
assert manager._file_queue == deque([file1, file2])
assert manager._force_refresh_bundles == {"dags-folder"}
with create_session() as session2:
parsing_request_after = session2.get(DagPriorityParsingRequest, parsing_request.id)
assert parsing_request_after is None
def test_parsing_requests_only_bundles_being_parsed(self, testing_dag_bundle):
"""Ensure the manager only handles parsing requests for bundles being parsed in this manager"""
from airflow.models.dagbag import DagPriorityParsingRequest
with create_session() as session:
session.add(DagPriorityParsingRequest(relative_fileloc="file_1.py", bundle_name="dags-folder"))
session.add(DagPriorityParsingRequest(relative_fileloc="file_x.py", bundle_name="testing"))
session.commit()
file1 = DagFileInfo(
bundle_name="dags-folder", rel_path=Path("file_1.py"), bundle_path=TEST_DAGS_FOLDER
)
manager = DagFileProcessorManager(max_runs=1)
manager._dag_bundles = list(DagBundlesManager().get_all_dag_bundles())
manager._queue_requested_files_for_parsing()
assert manager._file_queue == deque([file1])
with create_session() as session2:
parsing_request_after = session2.query(DagPriorityParsingRequest).all()
assert len(parsing_request_after) == 1
assert parsing_request_after[0].relative_fileloc == "file_x.py"
@pytest.mark.usefixtures("testing_dag_bundle")
def test_scan_stale_dags(self, session):
"""
Ensure that DAGs are marked inactive when the file is parsed but the
DagModel.last_parsed_time is not updated.
"""
manager = DagFileProcessorManager(
max_runs=1,
processor_timeout=10 * 60,
)
bundle = MagicMock()
bundle.name = "testing"
manager._dag_bundles = [bundle]
test_dag_path = DagFileInfo(
bundle_name="testing",
rel_path=Path("test_example_bash_operator.py"),
bundle_path=TEST_DAGS_FOLDER,
)
dagbag = DagBag(
test_dag_path.absolute_path,
include_examples=False,
bundle_path=test_dag_path.bundle_path,
)
# Add stale DAG to the DB
dag = dagbag.get_dag("test_example_bash_operator")
sync_dag_to_db(dag, session=session)
# Add DAG to the file_parsing_stats
stat = DagFileStat(
num_dags=1,
import_errors=0,
last_finish_time=timezone.utcnow() + timedelta(hours=1),
last_duration=1,
run_count=1,
last_num_of_db_queries=1,
)
manager._files = [test_dag_path]
manager._file_stats[test_dag_path] = stat
active_dag_count = (
session.query(func.count(DagModel.dag_id))
.filter(
~DagModel.is_stale,
DagModel.relative_fileloc == str(test_dag_path.rel_path),
DagModel.bundle_name == test_dag_path.bundle_name,
)
.scalar()
)
assert active_dag_count == 1
manager._scan_stale_dags()
active_dag_count = (
session.query(func.count(DagModel.dag_id))
.filter(
~DagModel.is_stale,
DagModel.relative_fileloc == str(test_dag_path.rel_path),
DagModel.bundle_name == test_dag_path.bundle_name,
)
.scalar()
)
assert active_dag_count == 0
serialized_dag_count = (
session.query(func.count(SerializedDagModel.dag_id))
.filter(SerializedDagModel.dag_id == dag.dag_id)
.scalar()
)
# Deactivating the DagModel should not delete the SerializedDagModel
# SerializedDagModel gives history about Dags
assert serialized_dag_count == 1
def test_kill_timed_out_processors_kill(self):
manager = DagFileProcessorManager(max_runs=1, processor_timeout=5)
# Set start_time to ensure timeout occurs: start_time = current_time - (timeout + 1) = always (timeout + 1) seconds
start_time = time.monotonic() - manager.processor_timeout - 1
processor, _ = self.mock_processor(start_time=start_time)
manager._processors = {
DagFileInfo(
bundle_name="testing", rel_path=Path("abc.txt"), bundle_path=TEST_DAGS_FOLDER
): processor
}
with mock.patch.object(type(processor), "kill") as mock_kill:
manager._kill_timed_out_processors()
mock_kill.assert_called_once_with(signal.SIGKILL)
assert len(manager._processors) == 0
processor.logger_filehandle.close.assert_called()
def test_kill_timed_out_processors_no_kill(self):
manager = DagFileProcessorManager(
max_runs=1,
processor_timeout=5,
)
processor, _ = self.mock_processor()
processor._process.create_time.return_value = timezone.make_aware(datetime.max).timestamp()
manager._processors = {
DagFileInfo(
bundle_name="testing", rel_path=Path("abc.txt"), bundle_path=TEST_DAGS_FOLDER
): processor
}
with mock.patch.object(type(processor), "kill") as mock_kill:
manager._kill_timed_out_processors()
mock_kill.assert_not_called()
@pytest.mark.usefixtures("testing_dag_bundle")
@pytest.mark.parametrize(
("callbacks", "path", "expected_body"),
[
pytest.param(
[],
"/opt/airflow/dags/test_dag.py",
{
"file": "/opt/airflow/dags/test_dag.py",
"bundle_path": "/opt/airflow/dags",
"bundle_name": "testing",
"callback_requests": [],
"type": "DagFileParseRequest",
},
),
pytest.param(
[
DagCallbackRequest(
filepath="dag_callback_dag.py",
dag_id="dag_id",
run_id="run_id",
bundle_name="testing",
bundle_version=None,
context_from_server=None,
is_failure_callback=False,
)
],
"/opt/airflow/dags/dag_callback_dag.py",
{
"file": "/opt/airflow/dags/dag_callback_dag.py",
"bundle_path": "/opt/airflow/dags",
"bundle_name": "testing",
"callback_requests": [
{
"filepath": "dag_callback_dag.py",
"bundle_name": "testing",
"bundle_version": None,
"msg": None,
"dag_id": "dag_id",
"run_id": "run_id",
"context_from_server": None,
"is_failure_callback": False,
"type": "DagCallbackRequest",
}
],
"type": "DagFileParseRequest",
},
),
],
)
def test_serialize_callback_requests(self, callbacks, path, expected_body):
from airflow.sdk.execution_time.comms import _ResponseFrame
processor, read_socket = self.mock_processor()
processor._on_child_started(
callbacks, path, bundle_path=Path("/opt/airflow/dags"), bundle_name="testing"
)
read_socket.settimeout(0.1)
# Read response from the read end of the socket
frame_len = int.from_bytes(read_socket.recv(4), "big")
bytes = read_socket.recv(frame_len)
frame = msgspec.msgpack.Decoder(_ResponseFrame).decode(bytes)
assert frame.body == expected_body
@conf_vars({("core", "load_examples"): "False"})
@pytest.mark.execution_timeout(10)
def test_dag_with_system_exit(self, configure_testing_dag_bundle):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = "exit_test_dag"
dag_directory = TEST_DAG_FOLDER.parent / "dags_with_system_exit"
# Delete the one valid DAG/SerializedDAG, and check that it gets re-created
clear_db_dags()
clear_db_serialized_dags()
with configure_testing_dag_bundle(dag_directory):
manager = DagFileProcessorManager(max_runs=1)
manager.run()
# Three files in folder should be processed
assert sum(stat.run_count for stat in manager._file_stats.values()) == 3
with create_session() as session:
assert session.get(DagModel, dag_id) is not None
@conf_vars({("core", "load_examples"): "False"})
@mock.patch("airflow.dag_processing.manager.Stats.timing")
@pytest.mark.skip("AIP-66: stats are not implemented yet")
def test_send_file_processing_statsd_timing(
self, statsd_timing_mock, tmp_path, configure_testing_dag_bundle
):
path_to_parse = tmp_path / "temp_dag.py"
dag_code = textwrap.dedent(
"""
from airflow import DAG
dag = DAG(dag_id='temp_dag')
"""
)
path_to_parse.write_text(dag_code)
with configure_testing_dag_bundle(tmp_path):
manager = DagFileProcessorManager(max_runs=1)
manager.run()
last_runtime = manager._file_stats[os.fspath(path_to_parse)].last_duration
statsd_timing_mock.assert_has_calls(
[
mock.call("dag_processing.last_duration.temp_dag", last_runtime),
mock.call("dag_processing.last_duration", last_runtime, tags={"file_name": "temp_dag"}),
],
any_order=True,
)
@pytest.mark.usefixtures("testing_dag_bundle")
def test_refresh_dags_dir_doesnt_delete_zipped_dags(
self, tmp_path, session, configure_testing_dag_bundle, test_zip_path
):
"""Test DagFileProcessorManager._refresh_dag_dir method"""
dagbag = DagBag(dag_folder=tmp_path, include_examples=False)
dagbag.process_file(test_zip_path)
dag = dagbag.get_dag("test_zip_dag")
sync_dag_to_db(dag)
with configure_testing_dag_bundle(test_zip_path):
manager = DagFileProcessorManager(max_runs=1)
manager.run()
# Assert dag not deleted in SDM
assert SerializedDagModel.has_dag("test_zip_dag")
# assert code not deleted
assert DagCode.has_dag(dag.dag_id)
# assert dag still active
assert session.get(DagModel, dag.dag_id).is_stale is False
@pytest.mark.usefixtures("testing_dag_bundle")
def test_refresh_dags_dir_deactivates_deleted_zipped_dags(
self, session, tmp_path, configure_testing_dag_bundle, test_zip_path
):
"""Test DagFileProcessorManager._refresh_dag_dir method"""
dag_id = "test_zip_dag"
filename = "test_zip.zip"
source_location = test_zip_path
bundle_path = Path(tmp_path, "test_refresh_dags_dir_deactivates_deleted_zipped_dags")
bundle_path.mkdir(exist_ok=True)
zip_dag_path = bundle_path / filename
shutil.copy(source_location, zip_dag_path)
with configure_testing_dag_bundle(bundle_path):
session.commit()
manager = DagFileProcessorManager(max_runs=1)
manager.run()
assert SerializedDagModel.has_dag(dag_id)
assert DagCode.has_dag(dag_id)
assert DagVersion.get_latest_version(dag_id)
dag = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id))
assert dag.is_stale is False
os.remove(zip_dag_path)
manager.run()
assert SerializedDagModel.has_dag(dag_id)
assert DagCode.has_dag(dag_id)
assert DagVersion.get_latest_version(dag_id)
dag = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id))
assert dag.is_stale is True
def test_deactivate_deleted_dags(self, dag_maker, session):
with dag_maker("test_dag1") as dag1:
dag1.relative_fileloc = "test_dag1.py"
with dag_maker("test_dag2") as dag2:
dag2.relative_fileloc = "test_dag2.py"
dag_maker.sync_dagbag_to_db()
active_files = [
DagFileInfo(
bundle_name="dag_maker",
rel_path=Path("test_dag1.py"),
bundle_path=TEST_DAGS_FOLDER,
),
# Mimic that the test_dag2.py file is deleted
]
manager = DagFileProcessorManager(max_runs=1)
manager.deactivate_deleted_dags("dag_maker", active_files)
# The DAG from test_dag1.py is still active
assert session.get(DagModel, "test_dag1").is_stale is False
# and the DAG from test_dag2.py is deactivated
assert session.get(DagModel, "test_dag2").is_stale is True
@pytest.mark.parametrize(
("rel_filelocs", "expected_return", "expected_dag1_stale", "expected_dag2_stale"),
[
pytest.param(
["test_dag1.py"], # Only dag1 present, dag2 deleted
True, # Should return True
False, # dag1 should not be stale
True, # dag2 should be stale
id="dags_deactivated",
),
pytest.param(
["test_dag1.py", "test_dag2.py"], # Both files present
False, # Should return False
False, # dag1 should not be stale
False, # dag2 should not be stale
id="no_dags_deactivated",
),
],
)
def test_deactivate_deleted_dags_return_value(
self, dag_maker, session, rel_filelocs, expected_return, expected_dag1_stale, expected_dag2_stale
):
"""Test that DagModel.deactivate_deleted_dags returns correct boolean value."""
with dag_maker("test_dag1") as dag1:
dag1.relative_fileloc = "test_dag1.py"
with dag_maker("test_dag2") as dag2:
dag2.relative_fileloc = "test_dag2.py"
dag_maker.sync_dagbag_to_db()
any_deactivated = DagModel.deactivate_deleted_dags(
bundle_name="dag_maker",
rel_filelocs=rel_filelocs,
session=session,
)
assert any_deactivated is expected_return
assert session.get(DagModel, "test_dag1").is_stale is expected_dag1_stale
assert session.get(DagModel, "test_dag2").is_stale is expected_dag2_stale
@pytest.mark.parametrize(
("active_files", "should_call_cleanup"),
[
pytest.param(
[
DagFileInfo(
bundle_name="dag_maker",
rel_path=Path("test_dag1.py"),
bundle_path=TEST_DAGS_FOLDER,
),
# test_dag2.py is deleted
],
True, # Should call cleanup
id="dags_deactivated",
),
pytest.param(
[
DagFileInfo(
bundle_name="dag_maker",
rel_path=Path("test_dag1.py"),
bundle_path=TEST_DAGS_FOLDER,
),
DagFileInfo(
bundle_name="dag_maker",
rel_path=Path("test_dag2.py"),
bundle_path=TEST_DAGS_FOLDER,
),
],
False, # Should NOT call cleanup
id="no_dags_deactivated",
),
],
)
@mock.patch("airflow.dag_processing.manager.remove_references_to_deleted_dags")
def test_manager_deactivate_deleted_dags_cleanup_behavior(
self, mock_remove_references, dag_maker, session, active_files, should_call_cleanup
):
"""Test that manager conditionally calls remove_references_to_deleted_dags based on whether DAGs were deactivated."""
with dag_maker("test_dag1") as dag1:
dag1.relative_fileloc = "test_dag1.py"
with dag_maker("test_dag2") as dag2:
dag2.relative_fileloc = "test_dag2.py"
dag_maker.sync_dagbag_to_db()
manager = DagFileProcessorManager(max_runs=1)
manager.deactivate_deleted_dags("dag_maker", active_files)
if should_call_cleanup:
mock_remove_references.assert_called_once()
else:
mock_remove_references.assert_not_called()
@conf_vars({("core", "load_examples"): "False"})
def test_fetch_callbacks_from_database(self, configure_testing_dag_bundle):
"""Test _fetch_callbacks returns callbacks ordered by priority_weight desc."""
dag_filepath = TEST_DAG_FOLDER / "test_on_failure_callback_dag.py"
callback1 = DagCallbackRequest(
dag_id="test_start_date_scheduling",
bundle_name="testing",
bundle_version=None,
filepath="test_on_failure_callback_dag.py",
is_failure_callback=True,
run_id="123",
)
callback2 = DagCallbackRequest(
dag_id="test_start_date_scheduling",
bundle_name="testing",
bundle_version=None,
filepath="test_on_failure_callback_dag.py",
is_failure_callback=True,
run_id="456",
)
with create_session() as session:
session.add(DbCallbackRequest(callback=callback1, priority_weight=11))
session.add(DbCallbackRequest(callback=callback2, priority_weight=10))
with configure_testing_dag_bundle(dag_filepath):
manager = DagFileProcessorManager(max_runs=1)
manager._dag_bundles = list(DagBundlesManager().get_all_dag_bundles())
with create_session() as session:
callbacks = manager._fetch_callbacks(session=session)
# Should return callbacks ordered by priority_weight desc (highest first)
assert callbacks[0].run_id == "123"
assert callbacks[1].run_id == "456"
assert session.query(DbCallbackRequest).count() == 0
@conf_vars(
{
("dag_processor", "max_callbacks_per_loop"): "2",
("core", "load_examples"): "False",
}
)
def test_fetch_callbacks_from_database_max_per_loop(self, tmp_path, configure_testing_dag_bundle):
"""Test DagFileProcessorManager._fetch_callbacks method"""
dag_filepath = TEST_DAG_FOLDER / "test_on_failure_callback_dag.py"
with create_session() as session:
for i in range(5):
callback = DagCallbackRequest(
dag_id="test_start_date_scheduling",
bundle_name="testing",
bundle_version=None,
filepath="test_on_failure_callback_dag.py",
is_failure_callback=True,
run_id=str(i),
)
session.add(DbCallbackRequest(callback=callback, priority_weight=i))
with configure_testing_dag_bundle(dag_filepath):
manager = DagFileProcessorManager(max_runs=1)
with create_session() as session:
manager.run()
assert session.query(DbCallbackRequest).count() == 3
with create_session() as session:
manager.run()
assert session.query(DbCallbackRequest).count() == 1
@conf_vars({("core", "load_examples"): "False"})
def test_fetch_callbacks_ignores_other_bundles(self, configure_testing_dag_bundle):
"""Ensure callbacks for bundles not owned by current dag processor manager are ignored and not deleted."""
dag_filepath = TEST_DAG_FOLDER / "test_on_failure_callback_dag.py"
# Create two callbacks: one for the active 'testing' bundle and one for a different bundle
matching = DagCallbackRequest(
dag_id="test_start_date_scheduling",
bundle_name="testing",
bundle_version=None,
filepath="test_on_failure_callback_dag.py",
is_failure_callback=True,
run_id="match",
)
non_matching = DagCallbackRequest(
dag_id="test_start_date_scheduling",
bundle_name="other-bundle",
bundle_version=None,
filepath="test_on_failure_callback_dag.py",
is_failure_callback=True,
run_id="no-match",
)
with create_session() as session:
session.add(DbCallbackRequest(callback=matching, priority_weight=100))
session.add(DbCallbackRequest(callback=non_matching, priority_weight=200))
with configure_testing_dag_bundle(dag_filepath):
manager = DagFileProcessorManager(max_runs=1)
manager._dag_bundles = list(DagBundlesManager().get_all_dag_bundles())
with create_session() as session:
callbacks = manager._fetch_callbacks(session=session)
# Only the matching callback should be returned
assert [c.run_id for c in callbacks] == ["match"]
# The non-matching callback should remain in the DB
remaining = session.query(DbCallbackRequest).all()
assert len(remaining) == 1
# Decode remaining request and verify it's for the other bundle
remaining_req = remaining[0].get_callback_request()
assert remaining_req.bundle_name == "other-bundle"
@mock.patch.object(DagFileProcessorManager, "_get_logger_for_dag_file")
def test_callback_queue(self, mock_get_logger, configure_testing_dag_bundle):
mock_logger = MagicMock()
mock_filehandle = MagicMock()
mock_get_logger.return_value = [mock_logger, mock_filehandle]
tmp_path = "/green_eggs/ham"
with configure_testing_dag_bundle(tmp_path):
# given
manager = DagFileProcessorManager(
max_runs=1,
processor_timeout=365 * 86_400,
)
manager._dag_bundles = list(DagBundlesManager().get_all_dag_bundles())
dag1_path = DagFileInfo(
bundle_name="testing", rel_path=Path("file1.py"), bundle_path=Path(tmp_path)
)
dag1_req1 = DagCallbackRequest(
filepath="file1.py",
dag_id="dag1",
run_id="run1",
is_failure_callback=False,
bundle_name="testing",
bundle_version=None,
msg=None,
)
dag1_req2 = DagCallbackRequest(
filepath="file1.py",
dag_id="dag1",
run_id="run1",
is_failure_callback=False,
bundle_name="testing",
bundle_version=None,
msg=None,
)
dag2_path = DagFileInfo(
bundle_name="testing", rel_path=Path("file2.py"), bundle_path=Path(tmp_path)
)
dag2_req1 = DagCallbackRequest(
filepath="file2.py",
dag_id="dag2",
run_id="run1",
bundle_name=dag2_path.bundle_name,
bundle_version=None,
is_failure_callback=False,
msg=None,
)
# when
manager._add_callback_to_queue(dag1_req1)
manager._add_callback_to_queue(dag2_req1)
# then - requests should be in manager's queue, with dag2 ahead of dag1 (because it was added last)
assert manager._file_queue == deque([dag2_path, dag1_path])
assert set(manager._callback_to_execute.keys()) == {
dag1_path,
dag2_path,
}
assert manager._callback_to_execute[dag2_path] == [dag2_req1]
# update the queue, although the callback is registered
assert manager._file_queue == deque([dag2_path, dag1_path])
# when
manager._add_callback_to_queue(dag1_req2)
# Since dag1_req2 is same as dag1_req1, we now have 2 items in file_path_queue
assert manager._file_queue == deque([dag2_path, dag1_path])
assert manager._callback_to_execute[dag1_path] == [
dag1_req1,
dag1_req2,
]
with mock.patch.object(
DagFileProcessorProcess, "start", side_effect=lambda *args, **kwargs: self.mock_processor()
) as start:
manager._start_new_processes()
# Callbacks passed to processor
assert start.call_args_list == [
mock.call(
id=mock.ANY,
path=Path(dag2_path.bundle_path, dag2_path.rel_path),
bundle_path=dag2_path.bundle_path,
bundle_name="testing",
callbacks=[dag2_req1],
selector=mock.ANY,
logger=mock_logger,
logger_filehandle=mock_filehandle,
client=mock.ANY,
),
mock.call(
id=mock.ANY,
path=Path(dag1_path.bundle_path, dag1_path.rel_path),
bundle_path=dag1_path.bundle_path,
bundle_name="testing",
callbacks=[dag1_req1, dag1_req2],
selector=mock.ANY,
logger=mock_logger,
logger_filehandle=mock_filehandle,
client=mock.ANY,
),
]
# And removed from the queue
assert dag1_path not in manager._callback_to_execute
assert dag2_path not in manager._callback_to_execute
def test_dag_with_assets(self, session, configure_testing_dag_bundle):
"""'Integration' test to ensure that the assets get parsed and stored correctly for parsed dags."""
test_dag_path = str(TEST_DAG_FOLDER / "test_assets.py")
with configure_testing_dag_bundle(test_dag_path):
manager = DagFileProcessorManager(
max_runs=1,
processor_timeout=365 * 86_400,
)
manager.run()
dag_model = session.get(DagModel, ("dag_with_skip_task"))
assert dag_model.task_outlet_asset_references == [
TaskOutletAssetReference(asset_id=mock.ANY, dag_id="dag_with_skip_task", task_id="skip_task")
]
def test_bundles_are_refreshed(self):
"""
Ensure bundles are refreshed by the manager, when necessary.
- always refresh all bundles when starting the manager
- refresh if the bundle hasn't been refreshed in the refresh_interval
- when the latest_version in the db doesn't match the version this manager knows about
"""
config = [
{
"name": "bundleone",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {"path": "/dev/null", "refresh_interval": 0},
},
{
"name": "bundletwo",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {"path": "/dev/null", "refresh_interval": 300},
},
]
bundleone = MagicMock()
bundleone.name = "bundleone"
bundleone.path = "/dev/null"
bundleone.refresh_interval = 0
bundleone.get_current_version.return_value = None
bundletwo = MagicMock()
bundletwo.name = "bundletwo"
bundletwo.path = "/dev/null"
bundletwo.refresh_interval = 300
bundletwo.get_current_version.return_value = None
with conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(config)}):
DagBundlesManager().sync_bundles_to_db()
with mock.patch("airflow.dag_processing.manager.DagBundlesManager") as mock_bundle_manager:
mock_bundle_manager.return_value._bundle_config = {"bundleone": None, "bundletwo": None}
mock_bundle_manager.return_value.get_all_dag_bundles.return_value = [bundleone, bundletwo]
# We should refresh bundleone twice, but bundletwo only once - it has a long refresh_interval
manager = DagFileProcessorManager(max_runs=2)
manager.run()
assert bundleone.refresh.call_count == 2
bundletwo.refresh.assert_called_once()
# Now, we should refresh both bundles, regardless of the refresh_interval
# as we are starting up a fresh manager
bundleone.reset_mock()
bundletwo.reset_mock()
manager = DagFileProcessorManager(max_runs=2)
manager.run()
assert bundleone.refresh.call_count == 2
bundletwo.refresh.assert_called_once()
# however, if the version doesn't match, we should still refresh
bundletwo.reset_mock()
def _update_bundletwo_version():
# We will update the bundle version in the db, so the next manager loop
# will believe another processor had seen a new version
with create_session() as session:
bundletwo_model = session.get(DagBundleModel, "bundletwo")
bundletwo_model.version = "123"
bundletwo.refresh.side_effect = _update_bundletwo_version
manager = DagFileProcessorManager(max_runs=2)
manager.run()
assert bundletwo.refresh.call_count == 2
def test_bundle_refresh_check_interval(self):
"""Ensure dag processor doesn't refresh bundles every loop."""
config = [
{
"name": "bundleone",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {"path": "/dev/null", "refresh_interval": 0},
},
]
bundleone = MagicMock()
bundleone.name = "bundleone"
bundleone.path = "/dev/null"
bundleone.refresh_interval = 0
bundleone.get_current_version.return_value = None
with conf_vars(
{
("dag_processor", "dag_bundle_config_list"): json.dumps(config),
("dag_processor", "bundle_refresh_check_interval"): "10",
}
):
DagBundlesManager().sync_bundles_to_db()
manager = DagFileProcessorManager(max_runs=2)
manager._dag_bundles = [bundleone]
manager._refresh_dag_bundles({})
assert bundleone.refresh.call_count == 1
manager._refresh_dag_bundles({})
assert bundleone.refresh.call_count == 1 # didn't fresh the second time
def test_bundle_force_refresh(self):
"""Ensure the dag processor honors force refreshing a bundle."""
config = [
{
"name": "bundleone",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {"path": "/dev/null", "refresh_interval": 0},
},
]
bundleone = MagicMock()
bundleone.name = "bundleone"
bundleone.path = "/dev/null"
bundleone.refresh_interval = 0
bundleone.get_current_version.return_value = None
with conf_vars(
{
("dag_processor", "dag_bundle_config_list"): json.dumps(config),
("dag_processor", "bundle_refresh_check_interval"): "10",
}
):
DagBundlesManager().sync_bundles_to_db()
manager = DagFileProcessorManager(max_runs=2)
manager._dag_bundles = [bundleone]
manager._refresh_dag_bundles({})
assert bundleone.refresh.call_count == 1
manager._force_refresh_bundles = {"bundleone"}
manager._refresh_dag_bundles({})
assert bundleone.refresh.call_count == 2 # forced refresh
def test_bundles_versions_are_stored(self, session):
config = [
{
"name": "bundleone",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {"path": "/dev/null", "refresh_interval": 0},
},
]
mybundle = MagicMock()
mybundle.name = "bundleone"
mybundle.path = "/dev/null"
mybundle.refresh_interval = 0
mybundle.supports_versioning = True
mybundle.get_current_version.return_value = "123"
with conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(config)}):
DagBundlesManager().sync_bundles_to_db()
with mock.patch("airflow.dag_processing.manager.DagBundlesManager") as mock_bundle_manager:
mock_bundle_manager.return_value._bundle_config = {"bundleone": None}
mock_bundle_manager.return_value.get_all_dag_bundles.return_value = [mybundle]
manager = DagFileProcessorManager(max_runs=1)
manager.run()
with create_session() as session:
model = session.get(DagBundleModel, "bundleone")
assert model.version == "123"
def test_non_versioned_bundle_get_version_not_called(self):
config = [
{
"name": "bundleone",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {"path": "/dev/null", "refresh_interval": 0},
},
]
bundleone = MagicMock()
bundleone.name = "bundleone"
bundleone.refresh_interval = 0
bundleone.supports_versioning = False
bundleone.path = Path("/dev/null")
with conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(config)}):
DagBundlesManager().sync_bundles_to_db()
with mock.patch("airflow.dag_processing.manager.DagBundlesManager") as mock_bundle_manager:
mock_bundle_manager.return_value._bundle_config = {"bundleone": None}
mock_bundle_manager.return_value.get_all_dag_bundles.return_value = [bundleone]
manager = DagFileProcessorManager(max_runs=1)
manager.run()
bundleone.refresh.assert_called_once()
bundleone.get_current_version.assert_not_called()
def test_versioned_bundle_get_version_called_once(self):
"""Make sure in a normal "warm" loop, get_current_version is called just once after refresha"""
config = [
{
"name": "bundleone",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {"path": "/dev/null", "refresh_interval": 0},
},
]
bundleone = MagicMock()
bundleone.name = "bundleone"
bundleone.refresh_interval = 0
bundleone.supports_versioning = True
bundleone.get_current_version.return_value = "123"
bundleone.path = Path("/dev/null")
with conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(config)}):
DagBundlesManager().sync_bundles_to_db()
with mock.patch("airflow.dag_processing.manager.DagBundlesManager") as mock_bundle_manager:
mock_bundle_manager.return_value._bundle_config = {"bundleone": None}
mock_bundle_manager.return_value.get_all_dag_bundles.return_value = [bundleone]
manager = DagFileProcessorManager(max_runs=1)
manager.run() # run it once to warm up
# now run it again so we can check we only call get_current_version once
bundleone.refresh.reset_mock()
bundleone.get_current_version.reset_mock()
manager.run()
bundleone.refresh.assert_called_once()
bundleone.get_current_version.assert_called_once()
@pytest.mark.parametrize(
("bundle_names", "expected"),
[
(None, {"bundle1", "bundle2", "bundle3"}),
(["bundle1"], {"bundle1"}),
(["bundle1", "bundle2"], {"bundle1", "bundle2"}),
],
)
def test_bundle_names_to_parse(self, bundle_names, expected, configure_dag_bundles):
config = {f"bundle{i}": os.devnull for i in range(1, 4)}
with configure_dag_bundles(config):
manager = DagFileProcessorManager(max_runs=1, bundle_names_to_parse=bundle_names)
manager._run_parsing_loop = MagicMock()
manager.run()
bundle_names_being_parsed = {b.name for b in manager._dag_bundles}
assert bundle_names_being_parsed == expected
@conf_vars({("core", "multi_team"): "true"})
def test_bundles_with_team(self, session):
team1_name = "test_team1"
team2_name = "test_team2"
# Create two teams
session.add(Team(name=team1_name))
session.add(Team(name=team2_name))
session.commit()
# Associate a dag bundle to a team
config = [
{
"name": "bundle_team",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {},
"team_name": team1_name,
},
]
with conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(config)}):
DagBundlesManager().sync_bundles_to_db()
team = session.scalars(select(Team).where(Team.name == team1_name)).one()
assert len(team.dag_bundles) == 1
assert team.dag_bundles[0].name == "bundle_team"
# Change the team ownership
config = [
{
"name": "bundle_team",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {},
"team_name": team2_name,
},
]
with conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(config)}):
DagBundlesManager().sync_bundles_to_db()
team1 = session.scalars(select(Team).where(Team.name == team1_name)).one()
assert len(team1.dag_bundles) == 0
team2 = session.scalars(select(Team).where(Team.name == team2_name)).one()
assert len(team2.dag_bundles) == 1
assert team2.dag_bundles[0].name == "bundle_team"
# Delete the team ownership
config = [
{
"name": "bundle_team",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {},
},
]
with conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(config)}):
DagBundlesManager().sync_bundles_to_db()
team1 = session.scalars(select(Team).where(Team.name == team1_name)).one()
assert len(team1.dag_bundles) == 0
team2 = session.scalars(select(Team).where(Team.name == team2_name)).one()
assert len(team2.dag_bundles) == 0
@mock.patch.object(DagFileProcessorProcess, "start")
def test_create_process_passes_bundle_name_to_process_start(
self, mock_process_start, configure_testing_dag_bundle
):
"""Test that DagFileProcessorManager._create_process() passes bundle_name to DagFileProcessorProcess.start()"""
with configure_testing_dag_bundle("/tmp"):
manager = DagFileProcessorManager(max_runs=1)
manager._dag_bundles = list(DagBundlesManager().get_all_dag_bundles())
# Setup test data
file_info = DagFileInfo(
bundle_name="testing", rel_path=Path("test_dag.py"), bundle_path=TEST_DAGS_FOLDER
)
# Mock the process creation
mock_process_start.return_value = self.mock_processor()[0]
# Call _create_process (only takes one parameter: dag_file)
manager._create_process(file_info)
# Verify DagFileProcessorProcess.start was called with correct bundle_name
mock_process_start.assert_called_once()
call_kwargs = mock_process_start.call_args.kwargs
assert call_kwargs["bundle_name"] == "testing"
| TestDagFileProcessorManager |
python | jazzband__django-oauth-toolkit | tests/test_introspection_auth.py | {
"start": 1305,
"end": 2927
} | class ____:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
def mocked_requests_post(url, data, *args, **kwargs):
"""
Mock the response from the authentication server
"""
if "token" in data and data["token"] and data["token"] != "12345678900":
return MockResponse(
{
"active": True,
"scope": "read write dolphin",
"client_id": "client_id_{}".format(data["token"]),
"username": "{}_user".format(data["token"]),
"exp": int(calendar.timegm(default_exp.timetuple())),
},
200,
)
return MockResponse(
{
"active": False,
},
200,
)
def mocked_introspect_request_short_living_token(url, data, *args, **kwargs):
exp = datetime.datetime.now() + datetime.timedelta(minutes=30)
return MockResponse(
{
"active": True,
"scope": "read write dolphin",
"client_id": "client_id_{}".format(data["token"]),
"username": "{}_user".format(data["token"]),
"exp": int(calendar.timegm(exp.timetuple())),
},
200,
)
urlpatterns = [
path("oauth2/", include("oauth2_provider.urls")),
path("oauth2-test-resource/", login_not_required(ScopeResourceView.as_view())),
]
@override_settings(ROOT_URLCONF=__name__)
@pytest.mark.usefixtures("oauth2_settings")
@pytest.mark.oauth2_settings(presets.INTROSPECTION_SETTINGS)
| MockResponse |
python | google__python-fire | fire/test_components.py | {
"start": 1373,
"end": 1446
} | class ____: # pylint: disable=old-style-class,no-init
pass
| OldStyleEmpty |
python | huggingface__transformers | src/transformers/trainer_callback.py | {
"start": 24273,
"end": 26124
} | class ____(TrainerCallback):
"""
A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints.
"""
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if state.global_step == 1 and args.logging_first_step:
control.should_log = True
if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0:
control.should_log = True
# Evaluate
if (
args.eval_strategy == IntervalStrategy.STEPS
and state.global_step % state.eval_steps == 0
and args.eval_delay <= state.global_step
):
control.should_evaluate = True
# Save
if (
args.save_strategy == SaveStrategy.STEPS
and state.save_steps > 0
and state.global_step % state.save_steps == 0
):
control.should_save = True
# End training
if state.global_step >= state.max_steps:
control.should_training_stop = True
# Save the model at the end if we have a save strategy
if args.save_strategy == SaveStrategy.STEPS:
control.should_save = True
return control
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if args.logging_strategy == IntervalStrategy.EPOCH:
control.should_log = True
# Evaluate
if args.eval_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch:
control.should_evaluate = True
# Save
if args.save_strategy == SaveStrategy.EPOCH:
control.should_save = True
return control
| DefaultFlowCallback |
python | apache__airflow | airflow-core/src/airflow/executors/executor_loader.py | {
"start": 2138,
"end": 18150
} | class ____:
"""Keeps constants for all the currently available executors."""
executors = {
LOCAL_EXECUTOR: "airflow.executors.local_executor.LocalExecutor",
CELERY_EXECUTOR: "airflow.providers.celery.executors.celery_executor.CeleryExecutor",
KUBERNETES_EXECUTOR: "airflow.providers.cncf.kubernetes."
"executors.kubernetes_executor.KubernetesExecutor",
}
@classmethod
def _get_executor_names(cls, validate_teams: bool = True) -> list[ExecutorName]:
"""
Return the executor names from Airflow configuration.
:param validate_teams: Whether to validate that team names exist in database
:return: List of executor names from Airflow configuration
"""
if _executor_names:
return _executor_names
all_executor_names: list[tuple[str | None, list[str]]] = cls._get_team_executor_configs(
validate_teams=validate_teams
)
executor_names: list[ExecutorName] = []
for team_name, executor_names_config in all_executor_names:
executor_names_per_team = []
for executor_name_str in executor_names_config:
if len(split_name := executor_name_str.split(":")) == 1:
name = split_name[0]
# Check if this is an alias for a core airflow executor, module
# paths won't be provided by the user in that case.
if core_executor_module := cls.executors.get(name):
executor_names_per_team.append(
ExecutorName(module_path=core_executor_module, alias=name, team_name=team_name)
)
# A module path was provided
else:
executor_names_per_team.append(
ExecutorName(alias=None, module_path=name, team_name=team_name)
)
# An alias was provided with the module path
elif len(split_name) == 2:
# Ensure the user is not trying to override the existing aliases of any of the core
# executors by providing an alias along with the existing core airflow executor alias
# (e.g. my_local_exec_alias:LocalExecutor). Allowing this makes things unnecessarily
# complicated. Multiple Executors of the same type will be supported by a future
# multitenancy AIP.
# The module component should always be a module path.
module_path = split_name[1]
if not module_path or module_path in CORE_EXECUTOR_NAMES or "." not in module_path:
raise AirflowConfigException(
"Incorrectly formatted executor configuration. Second portion of an executor "
f"configuration must be a module path but received: {module_path}"
)
executor_names_per_team.append(
ExecutorName(alias=split_name[0], module_path=split_name[1], team_name=team_name)
)
else:
raise AirflowConfigException(
f"Incorrectly formatted executor configuration: {executor_name_str}"
)
# As of now, we do not allow duplicate executors (within teams).
# Add all module paths to a set, since the actual code is what is unique
unique_modules = set([exec_name.module_path for exec_name in executor_names_per_team])
if len(unique_modules) < len(executor_names_per_team):
msg = (
"At least one executor was configured twice. Duplicate executors are not yet supported.\n"
"Please check your configuration again to correct the issue."
)
raise AirflowConfigException(msg)
executor_names.extend(executor_names_per_team)
# Populate some mappings for fast future lookups
for executor_name in executor_names:
# Executors will not always have aliases
if executor_name.alias:
_alias_to_executors_per_team[executor_name.team_name][executor_name.alias] = executor_name
# All executors will have a team name. It _may_ be None, for now that means it is a system level executor
_team_name_to_executors[executor_name.team_name].append(executor_name)
# All executors will have a module path
_module_to_executors_per_team[executor_name.team_name][executor_name.module_path] = executor_name
_classname_to_executors_per_team[executor_name.team_name][
executor_name.module_path.split(".")[-1]
] = executor_name
# Cache the executor names, so the logic of this method only runs once
_executor_names.append(executor_name)
return executor_names
@classmethod
def block_use_of_multi_team(cls):
"""
Raise an exception if the user tries to use multiple team based executors.
Before the feature is complete we do not want users to accidentally configure this.
This can be overridden by setting the AIRFLOW__DEV__MULTI_TEAM_MODE environment
variable to "enabled"
This check is built into a method so that it can be easily mocked in unit tests.
"""
team_dev_mode: str | None = os.environ.get("AIRFLOW__DEV__MULTI_TEAM_MODE")
if not team_dev_mode or team_dev_mode != "enabled":
raise AirflowConfigException("Configuring multiple team based executors is not yet supported!")
@classmethod
def _validate_teams_exist_in_database(cls, team_names: set[str]) -> None:
"""
Validate that all specified team names exist in the database.
:param team_names: Set of team names to validate
:raises AirflowConfigException: If any team names don't exist in the database
"""
if not team_names:
return
existing_teams = Team.get_all_team_names()
missing_teams = team_names - existing_teams
if missing_teams:
missing_teams_list = sorted(missing_teams)
missing_teams_str = ", ".join(missing_teams_list)
raise AirflowConfigException(
f"One or more teams specified in executor configuration do not exist in database: {missing_teams_str}. "
"Please create these teams first or remove them from executor configuration."
)
@classmethod
def _get_team_executor_configs(cls, validate_teams: bool = True) -> list[tuple[str | None, list[str]]]:
"""
Return a list of executor configs to be loaded.
Each tuple contains the team id as the first element and the second element is the executor config
for that team (a list of executor names/modules/aliases).
:param validate_teams: Whether to validate that team names exist in database
"""
from airflow.configuration import conf
executor_config = conf.get_mandatory_value("core", "executor")
if not executor_config:
raise AirflowConfigException(
"The 'executor' key in the 'core' section of the configuration is mandatory and cannot be empty"
)
configs: list[tuple[str | None, list[str]]] = []
seen_teams: set[str | None] = set()
# The executor_config can look like a few things. One is just a single executor name, such as
# "CeleryExecutor". Or a list of executors, such as "CeleryExecutor,KubernetesExecutor,module.path.to.executor".
# In these cases these are all executors that are available to all teams, with the first one being the
# default executor, as usual. The config can also look like a list of executors, per team, with the team name
# prefixing each list of executors separated by a equal sign and then each team list separated by a
# semi-colon.
# "LocalExecutor;team1=CeleryExecutor;team2=KubernetesExecutor,module.path.to.executor".
for team_executor_config in executor_config.split(";"):
# The first item in the list may not have a team id (either empty string before the equal
# sign or no equal sign at all), which means it is a global executor config.
if "=" not in team_executor_config or team_executor_config.startswith("="):
team_name = None
executor_names = team_executor_config.strip("=")
else:
cls.block_use_of_multi_team()
if conf.getboolean("core", "multi_team", fallback=False):
team_name, executor_names = team_executor_config.split("=")
else:
log.warning(
"The 'multi_team' config is not enabled, but team executors were configured. "
"The following team executor config will be ignored: %s",
team_executor_config,
)
continue
# Check for duplicate team names
if team_name in seen_teams:
raise AirflowConfigException(
f"Team '{team_name}' appears more than once in executor configuration. "
f"Each team can only be specified once in the executor config."
)
seen_teams.add(team_name)
# Split by comma to get the individual executor names and strip spaces off of them
configs.append((team_name, [name.strip() for name in executor_names.split(",")]))
# Validate that at least one global executor exists
has_global_executor = any(team_name is None for team_name, _ in configs)
if not has_global_executor:
raise AirflowConfigException(
"At least one global executor must be configured. Current configuration only contains "
"team-based executors. Please add a global executor configuration (e.g., "
"'CeleryExecutor;team1=LocalExecutor' instead of 'team1=CeleryExecutor;team2=LocalExecutor')."
)
# Validate that all team names exist in the database (excluding None for global configs)
team_names_to_validate = {team_name for team_name in seen_teams if team_name is not None}
if team_names_to_validate and validate_teams:
cls._validate_teams_exist_in_database(team_names_to_validate)
return configs
@classmethod
def get_executor_names(cls, validate_teams: bool = True) -> list[ExecutorName]:
"""
Return the executor names from Airflow configuration.
:param validate_teams: Whether to validate that team names exist in database
:return: List of executor names from Airflow configuration
"""
return cls._get_executor_names(validate_teams=validate_teams)
@classmethod
def get_default_executor_name(cls, team_name: str | None = None) -> ExecutorName:
"""
Return the default executor name from Airflow configuration.
:return: executor name from Airflow configuration
"""
cls._get_executor_names()
# The default executor is the first configured executor in the list
return _team_name_to_executors[team_name][0]
@classmethod
def get_default_executor(cls) -> BaseExecutor:
"""Create a new instance of the configured executor if none exists and returns it."""
default_executor = cls.load_executor(cls.get_default_executor_name())
return default_executor
@classmethod
def init_executors(cls) -> list[BaseExecutor]:
"""Create a new instance of all configured executors if not cached already."""
executor_names = cls._get_executor_names()
loaded_executors = []
for executor_name in executor_names:
loaded_executor = cls.load_executor(executor_name)
if executor_name.alias:
cls.executors[executor_name.alias] = executor_name.module_path
else:
cls.executors[loaded_executor.__class__.__name__] = executor_name.module_path
loaded_executors.append(loaded_executor)
return loaded_executors
@classmethod
def lookup_executor_name_by_str(
cls, executor_name_str: str, team_name: str | None = None
) -> ExecutorName:
# lookup the executor by alias first, if not check if we're given a module path
if (
not _classname_to_executors_per_team
or not _module_to_executors_per_team
or not _alias_to_executors_per_team
):
# if we haven't loaded the executors yet, such as directly calling load_executor
cls._get_executor_names()
if executor_name := _alias_to_executors_per_team.get(team_name, {}).get(executor_name_str):
return executor_name
if executor_name := _module_to_executors_per_team.get(team_name, {}).get(executor_name_str):
return executor_name
if executor_name := _classname_to_executors_per_team.get(team_name, {}).get(executor_name_str):
return executor_name
raise UnknownExecutorException(f"Unknown executor being loaded: {executor_name_str}")
@classmethod
def load_executor(cls, executor_name: ExecutorName | str | None) -> BaseExecutor:
"""
Load the executor.
This supports the following formats:
* by executor name for core executor
* by import path
* by class name of the Executor
* by ExecutorName object specification
:return: an instance of executor class via executor_name
"""
if not executor_name:
_executor_name = cls.get_default_executor_name()
elif isinstance(executor_name, str):
_executor_name = cls.lookup_executor_name_by_str(executor_name)
else:
_executor_name = executor_name
try:
executor_cls, import_source = cls.import_executor_cls(_executor_name)
log.debug("Loading executor %s from %s", _executor_name, import_source.value)
if _executor_name.team_name:
executor = executor_cls(team_name=_executor_name.team_name)
else:
executor = executor_cls()
except ImportError as e:
log.error(e)
raise AirflowConfigException(
f'The module/attribute could not be loaded. Please check "executor" key in "core" section. '
f'Current value: "{_executor_name}".'
)
log.info("Loaded executor: %s", _executor_name)
# Store the executor name we've built for this executor in the
# instance. This makes it easier for the Scheduler, Backfill, etc to
# know how we refer to this executor.
executor.name = _executor_name
return executor
@classmethod
def import_executor_cls(cls, executor_name: ExecutorName) -> tuple[type[BaseExecutor], ConnectorSource]:
"""
Import the executor class.
Supports the same formats as ExecutorLoader.load_executor.
:param executor_name: Name of core executor or module path to executor.
:return: executor class via executor_name and executor import source
"""
return import_string(executor_name.module_path), executor_name.connector_source
@classmethod
def import_default_executor_cls(cls) -> tuple[type[BaseExecutor], ConnectorSource]:
"""
Import the default executor class.
:return: executor class and executor import source
"""
executor_name = cls.get_default_executor_name()
executor, source = cls.import_executor_cls(executor_name)
return executor, source
| ExecutorLoader |
python | huggingface__transformers | tests/models/encoder_decoder/test_modeling_encoder_decoder.py | {
"start": 1849,
"end": 30640
} | class ____:
supports_sdpa = False
def get_encoder_decoder_model(self, config, decoder_config):
raise NotImplementedError
def prepare_config_and_inputs(self):
raise NotImplementedError
def get_pretrained_model(self):
raise NotImplementedError
def check_encoder_decoder_model_from_pretrained_configs(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs,
):
encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config)
self.assertTrue(encoder_decoder_config.decoder.is_decoder)
enc_dec_model = EncoderDecoderModel(encoder_decoder_config)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
self.assertTrue(enc_dec_model.config.is_encoder_decoder)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
self.assertEqual(
outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,))
)
def check_encoder_decoder_model(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs,
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
self.assertTrue(enc_dec_model.config.decoder.is_decoder)
self.assertTrue(enc_dec_model.config.decoder.add_cross_attention)
self.assertTrue(enc_dec_model.config.is_encoder_decoder)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
self.assertEqual(
outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,))
)
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_hidden_states)
outputs_encoder_decoder = enc_dec_model(
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
self.assertEqual(
outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,))
)
# Test passing encoder_outputs as tuple.
encoder_outputs = (encoder_hidden_states,)
outputs_encoder_decoder = enc_dec_model(
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
self.assertEqual(
outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,))
)
def check_encoder_decoder_model_from_pretrained_using_model_paths(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs,
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
with (
tempfile.TemporaryDirectory() as encoder_tmp_dirname,
tempfile.TemporaryDirectory() as decoder_tmp_dirname,
):
encoder_model.save_pretrained(encoder_tmp_dirname)
decoder_model.save_pretrained(decoder_tmp_dirname)
model_kwargs = {"encoder_hidden_dropout_prob": 0.0}
# BartConfig has no hidden_dropout_prob.
if not hasattr(decoder_config, "hidden_dropout_prob"):
model_kwargs["decoder_activation_function"] = "gelu"
else:
model_kwargs["decoder_hidden_dropout_prob"] = 0.0
enc_dec_model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_tmp_dirname, decoder_tmp_dirname, **model_kwargs
)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
return_dict=True,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
self.assertEqual(
outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,))
)
def check_encoder_decoder_model_from_pretrained(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
return_dict,
**kwargs,
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict}
enc_dec_model = EncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
return_dict=True,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
self.assertEqual(
outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,))
)
def check_save_and_load(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs,
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
with torch.no_grad():
outputs = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
enc_dec_model.save_pretrained(tmpdirname)
enc_dec_model = EncoderDecoderModel.from_pretrained(tmpdirname)
enc_dec_model.to(torch_device)
after_outputs = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def check_save_and_load_encoder_decoder_model(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs,
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
with torch.no_grad():
outputs = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with (
tempfile.TemporaryDirectory() as encoder_tmp_dirname,
tempfile.TemporaryDirectory() as decoder_tmp_dirname,
):
enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname)
enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname)
enc_dec_model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=encoder_tmp_dirname,
decoder_pretrained_model_name_or_path=decoder_tmp_dirname,
)
enc_dec_model.to(torch_device)
after_outputs = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def check_encoder_decoder_model_labels(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
labels,
**kwargs,
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
labels=labels,
)
loss = outputs_encoder_decoder["loss"]
# check that backprop works
loss.backward()
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
self.assertEqual(
outputs_encoder_decoder["encoder_last_hidden_state"].shape, (input_ids.shape + (config.hidden_size,))
)
def _check_output_with_attentions(
self, outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids
):
encoder_attentions = outputs_encoder_decoder["encoder_attentions"]
self.assertEqual(len(encoder_attentions), config.num_hidden_layers)
self.assertEqual(
encoder_attentions[0].shape[-3:], (config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1])
)
decoder_attentions = outputs_encoder_decoder["decoder_attentions"]
num_decoder_layers = (
decoder_config.num_decoder_layers
if hasattr(decoder_config, "num_decoder_layers")
else decoder_config.num_hidden_layers
)
self.assertEqual(len(decoder_attentions), num_decoder_layers)
self.assertEqual(
decoder_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]),
)
cross_attentions = outputs_encoder_decoder["cross_attentions"]
self.assertEqual(len(cross_attentions), num_decoder_layers)
cross_attention_input_seq_len = decoder_input_ids.shape[-1] * (
1 + (decoder_config.ngram if hasattr(decoder_config, "ngram") else 0)
)
self.assertEqual(
cross_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, cross_attention_input_seq_len, input_ids.shape[-1]),
)
def check_encoder_decoder_model_output_attentions(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
labels,
**kwargs,
):
# force eager attention to support output attentions
config._attn_implementation = "eager"
decoder_config._attn_implementation = "eager"
# make the decoder inputs a different shape from the encoder inputs to harden the test
decoder_input_ids = decoder_input_ids[:, :-1]
decoder_attention_mask = decoder_attention_mask[:, :-1]
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
output_attentions=True,
)
self._check_output_with_attentions(
outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids
)
def check_encoder_decoder_model_output_attentions_from_config(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
labels,
**kwargs,
):
# Similar to `check_encoder_decoder_model_output_attentions`, but with `output_attentions` triggered from the
# config file. Contrarily to most models, changing the model's config won't work -- the defaults are loaded
# from the inner models' configurations.
# force eager attention to support output attentions
config._attn_implementation = "eager"
decoder_config._attn_implementation = "eager"
decoder_input_ids = decoder_input_ids[:, :-1]
decoder_attention_mask = decoder_attention_mask[:, :-1]
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.config._attn_implementation = "eager" # model config -> won't work
enc_dec_model.config.output_attentions = True # model config -> won't work
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertTrue(
all(
key not in outputs_encoder_decoder
for key in ["encoder_attentions", "decoder_attentions", "cross_attentions"]
)
)
config.output_attentions = True # inner model config -> will work
decoder_config.output_attentions = True
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self._check_output_with_attentions(
outputs_encoder_decoder, config, input_ids, decoder_config, decoder_input_ids
)
def check_encoder_decoder_model_generate(self, input_ids, config, decoder_config, **kwargs):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
# Generate until max length
if hasattr(enc_dec_model.config, "eos_token_id"):
enc_dec_model.config.eos_token_id = None
if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"):
enc_dec_model.config.decoder.eos_token_id = None
if hasattr(enc_dec_model.generation_config, "eos_token_id"):
enc_dec_model.generation_config.eos_token_id = None
enc_dec_model.to(torch_device)
# Bert does not have a bos token id, so use pad_token_id instead
generated_output = enc_dec_model.generate(
input_ids,
decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id,
max_length=enc_dec_model.generation_config.max_length,
)
self.assertEqual(generated_output.shape, (input_ids.shape[0],) + (enc_dec_model.generation_config.max_length,))
def create_and_check_encoder_decoder_shared_weights(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
labels,
**kwargs,
):
torch.manual_seed(0)
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
model.to(torch_device)
model.eval()
# load state dict copies weights but does not tie them
decoder_state_dict = model.decoder._modules[model.decoder.base_model_prefix].state_dict()
model.encoder.load_state_dict(decoder_state_dict, strict=False)
torch.manual_seed(0)
tied_encoder_model, tied_decoder_model = self.get_encoder_decoder_model(config, decoder_config)
config = EncoderDecoderConfig.from_encoder_decoder_configs(
tied_encoder_model.config, tied_decoder_model.config, tie_encoder_decoder=True
)
tied_model = EncoderDecoderModel(encoder=tied_encoder_model, decoder=tied_decoder_model, config=config)
tied_model.to(torch_device)
tied_model.eval()
model_result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
# check that outputs are equal
self.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4
)
)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
tied_model.save_pretrained(tmpdirname)
tied_model = EncoderDecoderModel.from_pretrained(tmpdirname)
tied_model.to(torch_device)
tied_model.eval()
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
# check that outputs are equal
self.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4
)
)
def test_encoder_decoder_model(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model(**input_ids_dict)
def test_encoder_decoder_model_from_pretrained_configs(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict)
def test_encoder_decoder_model_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False)
def test_encoder_decoder_model_from_pretrained_return_dict(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True)
def test_encoder_decoder_model_from_pretrained_using_model_paths(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_from_pretrained_using_model_paths(**input_ids_dict, return_dict=False)
def test_save_and_load_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_save_and_load(**input_ids_dict)
def test_save_and_load_from_encoder_decoder_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_save_and_load_encoder_decoder_model(**input_ids_dict)
def test_encoder_decoder_model_labels(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_labels(**input_ids_dict)
def test_encoder_decoder_model_output_attentions(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_output_attentions(**input_ids_dict)
def test_encoder_decoder_model_output_attentions_from_config(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_output_attentions_from_config(**input_ids_dict)
def test_encoder_decoder_model_generate(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_generate(**input_ids_dict)
@unittest.skip("This is no longer FORCED, it was just not working before.")
def test_encoder_decoder_model_shared_weights(self):
input_ids_dict = self.prepare_config_and_inputs()
self.create_and_check_encoder_decoder_shared_weights(**input_ids_dict)
def test_training_gradient_checkpointing(self):
inputs_dict = self.prepare_config_and_inputs()
encoder_model, decoder_model = self.get_encoder_decoder_model(
inputs_dict["config"], inputs_dict["decoder_config"]
)
model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
model.config.decoder_start_token_id = 0
model.config.pad_token_id = 0
model_inputs = {
"input_ids": inputs_dict["input_ids"],
"attention_mask": inputs_dict["attention_mask"],
"labels": inputs_dict["labels"],
"decoder_input_ids": inputs_dict["decoder_input_ids"],
}
model_inputs = {k: v.to(torch_device) for k, v in model_inputs.items()}
loss = model(**model_inputs).loss
loss.backward()
@slow
@require_deterministic_for_xpu
def test_real_model_save_load_from_pretrained(self):
model_2 = self.get_pretrained_model()
model_2.to(torch_device)
input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size)
decoder_input_ids = ids_tensor([13, 1], model_2.config.encoder.vocab_size)
attention_mask = ids_tensor([13, 5], vocab_size=2)
with torch.no_grad():
outputs = model_2(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmp_dirname:
model_2.save_pretrained(tmp_dirname)
model_1 = EncoderDecoderModel.from_pretrained(tmp_dirname)
model_1.to(torch_device)
after_outputs = model_1(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_sdpa_can_dispatch_composite_models(self):
if not self.supports_sdpa:
self.skipTest("SDPA is not supported")
inputs_dict = self.prepare_config_and_inputs()
encoder_config, decoder_config = inputs_dict["config"], inputs_dict["decoder_config"]
config = EncoderDecoderConfig.from_encoder_decoder_configs(
encoder_config=encoder_config, decoder_config=decoder_config
)
model = EncoderDecoderModel(config=config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = EncoderDecoderModel.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
# see https://github.com/huggingface/transformers/pull/32238
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
encoder_attn = "sdpa" if model.encoder._supports_sdpa else "eager"
decoder_attn = "sdpa" if model.decoder._supports_sdpa else "eager"
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_sdpa.encoder.config._attn_implementation == encoder_attn)
self.assertTrue(model_sdpa.decoder.config._attn_implementation == decoder_attn)
# Also test that nothing break if we request SDPA explicitly, when both sub-parts support it.
# If the model supports sdpa (i.e. all of sub-models supports it) we'll dispatch safely
# Otherwise we should raise error that SDPA is not supported, as some of the sub-models doesn't support
if encoder_attn == "sdpa" and decoder_attn == "sdpa":
model_sdpa_explicit = EncoderDecoderModel.from_pretrained(tmpdirname, attn_implementation="sdpa")
model_sdpa_explicit = model_sdpa_explicit.eval().to(torch_device)
self.assertTrue(model_sdpa_explicit.config._attn_implementation == "sdpa")
else:
with self.assertRaises(ValueError):
model_sdpa_explicit = EncoderDecoderModel.from_pretrained(tmpdirname, attn_implementation="sdpa")
model_eager = EncoderDecoderModel.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.encoder.config._attn_implementation == "eager")
self.assertTrue(model_eager.decoder.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
@require_torch
| EncoderDecoderMixin |
python | ray-project__ray | python/ray/util/collective/util.py | {
"start": 1303,
"end": 2118
} | class ____:
"""Store the group information created via `create_collective_group`.
Note: Should be used as a NamedActor.
"""
def __init__(self):
self.ids = None
self.world_size = -1
self.rank = -1
self.backend = None
self.gloo_timeout = 30000
def set_info(self, ids, world_size, rank, backend, gloo_timeout):
"""Store collective information."""
self.ids = ids
self.world_size = world_size
self.rank = rank
self.backend = backend
self.gloo_timeout = gloo_timeout
def get_info(self):
"""Get previously stored collective information."""
return (
self.ids,
self.world_size,
self.rank,
self.backend,
self.gloo_timeout,
)
| Info |
python | ansible__ansible | lib/ansible/plugins/lookup/config.py | {
"start": 3837,
"end": 5687
} | class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
missing = self.get_option('on_missing')
ptype = self.get_option('plugin_type')
pname = self.get_option('plugin_name')
show_origin = self.get_option('show_origin')
if (ptype or pname) and not (ptype and pname):
raise AnsibleError('Both plugin_type and plugin_name are required, cannot use one without the other.')
ret = []
for term in terms:
if not isinstance(term, str):
raise AnsibleError(f'Invalid setting identifier, {term!r} is not a {str}, its a {type(term)}.')
result = Sentinel
origin = None
# plugin creates settings on load, we ensure that happens here
if pname:
# this is cached so not too expensive
loader = getattr(plugin_loader, f'{ptype}_loader')
p = loader.get(pname, class_only=True)
if p is None:
raise AnsibleError(f"Unable to load {ptype} plugin {pname!r}.")
try:
result, origin = C.config.get_config_value_and_origin(term, plugin_type=ptype, plugin_name=pname, variables=variables)
except AnsibleUndefinedConfigEntry as e:
match missing:
case 'error':
raise
case 'skip':
pass
case 'warn':
self._display.error_as_warning(msg=f"Skipping {term}.", exception=e)
if result is not Sentinel:
if show_origin:
ret.append([result, origin])
else:
ret.append(result)
return ret
| LookupModule |
python | pytorch__pytorch | torch/_dynamo/convert_frame.py | {
"start": 5839,
"end": 18262
} | class ____:
def __init__(self) -> None:
self.seen: list[ReferenceType[CodeType]] = []
self.seen_ids: set[int] = set()
def add(self, strong_obj: CodeType) -> None:
idx = id(strong_obj)
if idx not in self.seen_ids:
obj = weakref.ref(strong_obj, lambda _: self.seen_ids.remove(idx))
self.seen.append(obj)
self.seen_ids.add(idx)
def __contains__(self, item: CodeType) -> bool:
return id(item) in self.seen_ids
def clear(self) -> None:
self.seen.clear()
self.seen_ids.clear()
input_codes = Tracker()
output_codes = Tracker()
initial_global_state: Optional[GlobalStateGuard] = None
@functools.wraps(original_forward_from_src)
def fx_forward_from_src_skip_result(
src: str, globals: dict[str, Any], co_fields: Optional[dict[str, str]] = None
) -> FunctionType:
# we monkey patch FX to prevent infinite loop of trying to convert
# our generated code
result = original_forward_from_src(src, globals, co_fields)
skip_code(result.__code__)
return result
def log_dynamo_start(code: CodeType, skip: int = 0) -> list[str]:
convert_frame_intern = structured.intern_string(__file__)
captured_tb = CapturedTraceback.extract(skip=4 + skip).summary()
frames_interned = structured.from_traceback(captured_tb)
# Extract and filter the stack
stack = list(
itertools.takewhile(
lambda f: f["filename"] != convert_frame_intern,
frames_interned,
)
) + [
{
"line": code.co_firstlineno,
"name": code.co_name,
"filename": structured.intern_string(code.co_filename),
}
]
# Initialize the ChromiumEventLogger on start
torch._logging.trace_structured(
"dynamo_start",
lambda: {"stack": stack},
)
# Capture stack separately without using from_traceback to get the actual filenames
stack_strings = [
f"Line: {frame.lineno}, Name: {frame.name}, Filename: {frame.filename}"
for frame in captured_tb
if frame.filename != convert_frame_intern
] + [
f"Line: {code.co_firstlineno}, Name: {code.co_name}, Filename: {code.co_filename}"
]
return stack_strings
def preserve_global_state(fn: Callable[_P, _T]) -> Callable[_P, _T]:
"""
Context manager to:
1) Save/restore torch.is_grad_enabled() state
2) Save/restore python random state
3) Save/restore torch random state
4) Monkey patch torch.fx.graph_module._forward_from_src
"""
@functools.wraps(fn)
def _fn(*args: _P.args, **kwargs: _P.kwargs) -> _T:
guards = GlobalStateGuard()
prior_grad_mode = torch.is_grad_enabled()
# Just in case we get left in a bad dispatch state we want to restore
# it. This can happen because the dispatch bits aren't a true
# stack/counter - so we can't just increment/decrement them as we enter
# and leave.
with (
torch._C._PreserveDispatchKeyGuard(),
maybe_disable_inference_mode(),
maybe_disable_inference_mode_for_fake_prop(),
):
prior_inference_mode = torch.is_inference_mode_enabled()
prior_deterministic = torch.are_deterministic_algorithms_enabled()
prior_warn_only = torch.is_deterministic_algorithms_warn_only_enabled()
prior_mobile_allocator_state = (
torch._C._is_default_mobile_cpu_allocator_set()
)
py_rng_state = random.getstate()
prior_dtype = torch.get_default_dtype()
torch_rng_state = torch.random.get_rng_state()
cuda_rng_state = None
if torch.cuda.is_available():
with torch._C.DisableTorchFunction():
cuda_rng_state = torch.cuda.get_rng_state()
cuda_matmul_fp32_prec = torch._C._get_fp32_precision_getter(
"cuda", "matmul"
)
prior_fwd_from_src = torch.fx.graph_module._forward_from_src
torch.fx.graph_module._forward_from_src = fx_forward_from_src_skip_result
cleanup = setup_compile_debug()
exit_stack = contextlib.ExitStack()
exit_stack.enter_context(
torch.fx._symbolic_trace._maybe_revert_all_patches()
)
exit_stack.enter_context(torch_function_mode_stack_state_mgr)
reset_user_object_tracking()
try:
return fn(*args, **kwargs)
finally:
cleanup.close()
assert torch._C._len_torch_function_stack() == 0, (
"Torch function mode stack state changed while dynamo tracing, please report a bug"
)
exit_stack.close()
torch._C._set_grad_enabled(prior_grad_mode)
torch.autograd.grad_mode._enter_inference_mode(prior_inference_mode)
torch.use_deterministic_algorithms(
prior_deterministic, warn_only=prior_warn_only
)
random.setstate(py_rng_state)
torch.random.set_rng_state(torch_rng_state)
torch.set_default_dtype(prior_dtype)
curr_mobile_allocator_state = (
torch._C._is_default_mobile_cpu_allocator_set()
)
if prior_mobile_allocator_state != curr_mobile_allocator_state:
torch._C._unset_default_mobile_cpu_allocator()
if cuda_rng_state is not None:
with torch._C.DisableTorchFunction():
torch.cuda.set_rng_state(cuda_rng_state)
torch._C._set_fp32_precision_setter(
"cuda", "matmul", cuda_matmul_fp32_prec
)
torch.fx.graph_module._forward_from_src = prior_fwd_from_src
assert guards.check(), (
f"Global {guards.reason()}state changed while dynamo tracing, please report a bug"
)
_fn._torchdynamo_orig_backend = fn # type: ignore[attr-defined]
return _fn
@TorchPatcher.suppress_torch_distributed_warnings
def has_tensor_in_frame(frame: DynamoFrameType) -> bool:
"""Check if the frame has torch.* related bits"""
# Check if the function was decorated using torch._dynamo.optimize
if frame.f_code in always_optimize_code_objects:
return True
# Check if there is global import of torch.*
for co_name in frame.f_code.co_names:
if co_name in frame.f_globals:
obj = frame.f_globals[co_name]
if isinstance(obj, ModuleType) and (
obj.__name__.startswith("torch.") or obj is torch
):
return True
# ... or a global import of numpy.*
if np and config.trace_numpy and (obj is np or is_numpy(obj)):
return True
seen_ids: dict[int, bool] = {}
def has_tensor(obj: object) -> bool:
"""Recursively check if the obj has a tensor"""
obj_id = id(obj)
if obj_id in seen_ids:
return seen_ids[obj_id]
seen_ids[obj_id] = False
if isinstance(obj, (torch.Tensor, torch.nn.Module)) or (
istype(obj, type) and issubclass(obj, torch.nn.Module)
):
seen_ids[obj_id] = True
return seen_ids[obj_id]
elif (
config.trace_numpy
and np
and (istype(obj, np.ndarray) or isinstance(obj, np.generic))
):
seen_ids[obj_id] = True
return seen_ids[obj_id]
elif istype(obj, (list, tuple)):
seen_ids[obj_id] = any(has_tensor(v) for v in obj)
return seen_ids[obj_id]
elif istype(obj, dict):
# Some packages like pytest can be updated during runtime. So, make a
# copy of values to avoid issues like "RuntimeError: dictionary
# changed size during iteration"
values = list(obj.values())
seen_ids[obj_id] = any(has_tensor(v) for v in values)
return seen_ids[obj_id]
elif istype(obj, (str, int, float, type(None), bool)):
seen_ids[obj_id] = False
return seen_ids[obj_id]
elif is_namedtuple(obj) and hasattr(obj, "_fields"):
seen_ids[obj_id] = any(has_tensor(getattr(obj, v)) for v in obj._fields)
return seen_ids[obj_id]
else:
# if config.debug:
# print(
# f"Assuming that object of type {type(obj)} does not have a tensor"
# )
return False
# Check if the passed arguments are of type Tensor
for value in frame.f_locals.values():
if has_tensor(value):
return True
log.debug(
"skipping because no torch.* %s \
%s %s",
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_code.co_firstlineno,
)
return False
def exception_handler(
e: Exception,
code: CodeType,
frame: Optional[DynamoFrameType] = None,
export: bool = False,
) -> None:
record_filename = None
if hasattr(e, "exec_record"):
record_filename = gen_record_file_name(e, code)
write_record_to_file(record_filename, e.exec_record)
e.record_filename = record_filename # type: ignore[attr-defined]
augment_exc_message(e, export=export)
FRAME_COUNTER = 0
FRAME_COMPILE_COUNTER: typing.Counter[Union[int, FrameStateSizeEntry]] = (
collections.Counter()
)
def maybe_cprofile(func: Callable[_P, _T]) -> Callable[_P, _T]:
if config.cprofile:
return cprofile_wrapper(func)
return func
def cprofile_wrapper(func: Callable[_P, _T]) -> Callable[_P, _T]:
@functools.wraps(func)
def profile_wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _T:
trace_id = CompileContext.current_trace_id()
assert trace_id, "Trace id is None"
profile_path = Path(
f"/tmp/{func.__name__}_{str(trace_id).replace('/', '_')}.profile"
)
prof = cProfile.Profile()
try:
prof.enable()
start_ts = time.time()
# pyrefly: ignore [bad-argument-type]
retval = prof.runcall(func, *args, **kwargs)
profile_latency = time.time() - start_ts
prof.disable()
except ValueError:
log.exception("failed to enable cProfile")
profile_latency = 0
retval = func(*args, **kwargs)
log.warning(
"### Cprofile for %s trace id [%s] took %.3f seconds ###",
func.__name__,
trace_id,
profile_latency,
)
ps = pstats.Stats(prof)
try:
prof.dump_stats(profile_path)
except OSError:
log.exception("Cannot write to %s", profile_path)
log.warning("Raw profile at %s", profile_path)
svg_path = profile_path.with_suffix(".svg")
try:
gprof2dot_process = subprocess.Popen(
[
"gprof2dot",
"-f",
"pstats",
"--node-label=total-time-percentage",
"--node-label=self-time-percentage",
"--node-label=total-time",
str(profile_path),
],
stdout=subprocess.PIPE,
)
subprocess.check_call(
["dot", "-Tsvg", "-o", str(svg_path)],
stdin=gprof2dot_process.stdout,
)
log.warning("Generated SVG from profile at %s", svg_path)
except FileNotFoundError:
log.warning(
"Failed to generate SVG from profile -- dumping stats instead."
"Try installing gprof2dot and dot for a better visualization"
)
ps.sort_stats(pstats.SortKey.TIME).print_stats(20)
ps.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(20)
if manifold_link := maybe_upload_prof_stats_to_manifold(
str(profile_path)
): # fb-only
torch._logging.trace_structured(
"link",
lambda: {"name": "cprofile_manifold_url", "url": manifold_link},
)
return retval
return profile_wrapper
@dataclass
| Tracker |
python | apache__airflow | providers/ssh/tests/unit/ssh/operators/test_ssh.py | {
"start": 1921,
"end": 12196
} | class ____:
def setup_method(self):
hook = SSHHook(ssh_conn_id="ssh_default")
hook.no_host_key_check = True
ssh_client = mock.create_autospec(SSHClient)
# `with ssh_client` should return itself.
ssh_client.__enter__.return_value = ssh_client
hook.get_conn = mock.MagicMock(return_value=ssh_client)
self.hook = hook
# Make sure nothing in this test actually connects to SSH -- that's for hook tests.
@pytest.fixture(autouse=True)
def patch_exec_ssh_client(self):
with mock.patch.object(self.hook, "exec_ssh_client_command") as exec_ssh_client_command:
self.exec_ssh_client_command = exec_ssh_client_command
exec_ssh_client_command.return_value = (0, b"airflow", "")
yield exec_ssh_client_command
@pytest.mark.parametrize(
("cmd_timeout", "cmd_timeout_expected"),
[(45, 45), ("Not Set", 10), (None, None)],
)
def test_hook_created_correctly(self, cmd_timeout, cmd_timeout_expected):
conn_timeout = 20
if cmd_timeout == "Not Set":
task = SSHOperator(
task_id="test",
command=COMMAND,
conn_timeout=conn_timeout,
ssh_conn_id="ssh_default",
)
else:
task = SSHOperator(
task_id="test",
command=COMMAND,
conn_timeout=conn_timeout,
cmd_timeout=cmd_timeout,
ssh_conn_id="ssh_default",
)
assert conn_timeout == task.hook.conn_timeout
assert cmd_timeout_expected == task.hook.cmd_timeout
assert task.hook.ssh_conn_id == "ssh_default"
@pytest.mark.parametrize(
("enable_xcom_pickling", "output", "expected"),
[(False, b"airflow", "YWlyZmxvdw=="), (True, b"airflow", b"airflow"), (True, b"", b"")],
)
def test_return_value(self, enable_xcom_pickling, output, expected):
task = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command=COMMAND,
environment={"TEST": "value"},
)
with conf_vars({("core", "enable_xcom_pickling"): str(enable_xcom_pickling)}):
self.exec_ssh_client_command.return_value = (0, output, b"")
result = task.execute(None)
assert result == expected
self.exec_ssh_client_command.assert_called_with(
mock.ANY, COMMAND, timeout=NOTSET, environment={"TEST": "value"}, get_pty=False
)
@mock.patch("os.environ", {"AIRFLOW_CONN_" + TEST_CONN_ID.upper(): "ssh://test_id@localhost"})
@mock.patch.object(SSHOperator, "run_ssh_client_command")
@mock.patch.object(SSHHook, "get_conn")
def test_arg_checking(self, get_conn, run_ssh_client_command):
run_ssh_client_command.return_value = b""
# Exception should be raised if neither ssh_hook nor ssh_conn_id is provided.
task_0 = SSHOperator(task_id="test", command=COMMAND)
with pytest.raises(AirflowException, match="Cannot operate without ssh_hook or ssh_conn_id."):
task_0.execute(None)
# If ssh_hook is invalid/not provided, use ssh_conn_id to create SSHHook.
task_1 = SSHOperator(
task_id="test_1",
ssh_hook="string_rather_than_SSHHook", # Invalid ssh_hook.
ssh_conn_id=TEST_CONN_ID,
command=COMMAND,
)
task_1.execute(None)
assert task_1.ssh_hook.ssh_conn_id == TEST_CONN_ID
task_2 = SSHOperator(
task_id="test_2",
ssh_conn_id=TEST_CONN_ID, # No ssh_hook provided.
command=COMMAND,
)
task_2.execute(None)
assert task_2.ssh_hook.ssh_conn_id == TEST_CONN_ID
# If both valid ssh_hook and ssh_conn_id are provided, ignore ssh_conn_id.
task_3 = SSHOperator(
task_id="test_3",
ssh_hook=self.hook,
ssh_conn_id=TEST_CONN_ID,
command=COMMAND,
)
task_3.execute(None)
assert task_3.ssh_hook.ssh_conn_id == self.hook.ssh_conn_id
# If remote_host was specified, ensure it is used
task_4 = SSHOperator(
task_id="test_4",
ssh_hook=self.hook,
ssh_conn_id=TEST_CONN_ID,
command=COMMAND,
remote_host="operator_remote_host",
)
task_4.execute(None)
assert task_4.ssh_hook.ssh_conn_id == self.hook.ssh_conn_id
assert task_4.ssh_hook.remote_host == "operator_remote_host"
with pytest.raises(
AirflowException, match="SSH operator error: SSH command not specified. Aborting."
):
SSHOperator(
task_id="test_5",
ssh_hook=self.hook,
command=None,
).execute(None)
@pytest.mark.parametrize(
("command", "get_pty_in", "get_pty_out"),
[
(COMMAND, False, False),
(COMMAND, True, True),
(COMMAND_WITH_SUDO, False, True),
(COMMAND_WITH_SUDO, True, True),
],
)
def test_get_pyt_set_correctly(self, command, get_pty_in, get_pty_out):
task = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command=command,
get_pty=get_pty_in,
)
task.execute(None)
assert task.get_pty == get_pty_out
def test_ssh_client_managed_correctly(self):
# Ensure connection gets closed once (via context_manager) using on_kill
task = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command="ls",
)
task.execute()
self.hook.get_conn.assert_called_once()
self.hook.get_conn.return_value.__exit__.assert_called_once()
@pytest.mark.parametrize(
("extra_kwargs", "actual_exit_code", "expected_exc"),
[
({}, 0, None),
({}, 100, AirflowException),
({}, 101, AirflowException),
({"skip_on_exit_code": None}, 0, None),
({"skip_on_exit_code": None}, 100, AirflowException),
({"skip_on_exit_code": None}, 101, AirflowException),
({"skip_on_exit_code": 100}, 0, None),
({"skip_on_exit_code": 100}, 100, AirflowSkipException),
({"skip_on_exit_code": 100}, 101, AirflowException),
({"skip_on_exit_code": 0}, 0, AirflowSkipException),
({"skip_on_exit_code": [100]}, 0, None),
({"skip_on_exit_code": [100]}, 100, AirflowSkipException),
({"skip_on_exit_code": [100]}, 101, AirflowException),
({"skip_on_exit_code": [100, 102]}, 101, AirflowException),
({"skip_on_exit_code": (100,)}, 0, None),
({"skip_on_exit_code": (100,)}, 100, AirflowSkipException),
({"skip_on_exit_code": (100,)}, 101, AirflowException),
],
)
def test_skip(self, extra_kwargs, actual_exit_code, expected_exc):
command = "not_a_real_command"
self.exec_ssh_client_command.return_value = (actual_exit_code, b"", b"")
operator = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command=command,
**extra_kwargs,
)
if expected_exc is None:
operator.execute({})
else:
with pytest.raises(expected_exc):
operator.execute({})
def test_command_errored(self):
# Test that run_ssh_client_command works on invalid commands
command = "not_a_real_command"
task = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command=command,
)
self.exec_ssh_client_command.return_value = (1, b"", b"Error here")
with pytest.raises(AirflowException, match="SSH operator error: exit status = 1"):
task.execute(None)
def test_push_ssh_exit_to_xcom(self, request, dag_maker):
# Test pulls the value previously pushed to xcom and checks if it's the same
command = "not_a_real_command"
ssh_exit_code = random.randrange(1, 100)
self.exec_ssh_client_command.return_value = (ssh_exit_code, b"", b"ssh output")
with dag_maker(dag_id=f"dag_{request.node.name}") as dag:
task = SSHOperator(task_id="push_xcom", ssh_hook=self.hook, command=command)
dr = dag_maker.create_dagrun(run_id="push_xcom")
if AIRFLOW_V_3_0_PLUS:
sync_dag_to_db(dag)
dag_version = DagVersion.get_latest_version(dag.dag_id)
ti = TaskInstance(task=task, run_id=dr.run_id, dag_version_id=dag_version.id)
else:
ti = TaskInstance(task=task, run_id=dr.run_id)
with pytest.raises(AirflowException, match=f"SSH operator error: exit status = {ssh_exit_code}"):
dag_maker.run_ti("push_xcom", dr)
assert ti.xcom_pull(task_ids=task.task_id, key="ssh_exit") == ssh_exit_code
def test_timeout_triggers_on_kill(self, request, dag_maker):
def command_sleep_forever(*args, **kwargs):
time.sleep(100) # This will be interrupted by the timeout
self.exec_ssh_client_command.side_effect = command_sleep_forever
with dag_maker(dag_id=f"dag_{request.node.name}"):
_ = SSHOperator(
task_id="test_timeout",
ssh_hook=self.hook,
command="sleep 100",
execution_timeout=timedelta(seconds=1),
)
dr = dag_maker.create_dagrun(run_id="test_timeout")
with mock.patch.object(SSHOperator, "on_kill") as mock_on_kill:
with pytest.raises(AirflowTaskTimeout):
dag_maker.run_ti("test_timeout", dr)
# Wait a bit to ensure on_kill has time to be called
time.sleep(1)
mock_on_kill.assert_called_once()
def test_remote_host_passed_at_hook_init(self):
remote_host = "test_host.internal"
task = SSHOperator(
task_id="test_remote_host_passed",
ssh_conn_id="ssh_default",
remote_host=remote_host,
command=COMMAND,
)
assert task.hook.remote_host == remote_host
| TestSSHOperator |
python | pandas-dev__pandas | pandas/core/arraylike.py | {
"start": 625,
"end": 17765
} | class ____:
# -------------------------------------------------------------
# Comparisons
def _cmp_method(self, other, op):
return NotImplemented
@unpack_zerodim_and_defer("__eq__")
def __eq__(self, other):
return self._cmp_method(other, operator.eq)
@unpack_zerodim_and_defer("__ne__")
def __ne__(self, other):
return self._cmp_method(other, operator.ne)
@unpack_zerodim_and_defer("__lt__")
def __lt__(self, other):
return self._cmp_method(other, operator.lt)
@unpack_zerodim_and_defer("__le__")
def __le__(self, other):
return self._cmp_method(other, operator.le)
@unpack_zerodim_and_defer("__gt__")
def __gt__(self, other):
return self._cmp_method(other, operator.gt)
@unpack_zerodim_and_defer("__ge__")
def __ge__(self, other):
return self._cmp_method(other, operator.ge)
# -------------------------------------------------------------
# Logical Methods
def _logical_method(self, other, op):
return NotImplemented
@unpack_zerodim_and_defer("__and__")
def __and__(self, other):
return self._logical_method(other, operator.and_)
@unpack_zerodim_and_defer("__rand__")
def __rand__(self, other):
return self._logical_method(other, roperator.rand_)
@unpack_zerodim_and_defer("__or__")
def __or__(self, other):
return self._logical_method(other, operator.or_)
@unpack_zerodim_and_defer("__ror__")
def __ror__(self, other):
return self._logical_method(other, roperator.ror_)
@unpack_zerodim_and_defer("__xor__")
def __xor__(self, other):
return self._logical_method(other, operator.xor)
@unpack_zerodim_and_defer("__rxor__")
def __rxor__(self, other):
return self._logical_method(other, roperator.rxor)
# -------------------------------------------------------------
# Arithmetic Methods
def _arith_method(self, other, op):
return NotImplemented
@unpack_zerodim_and_defer("__add__")
def __add__(self, other):
"""
Get Addition of DataFrame and other, column-wise.
Equivalent to ``DataFrame.add(other)``.
Parameters
----------
other : scalar, sequence, Series, dict or DataFrame
Object to be added to the DataFrame.
Returns
-------
DataFrame
The result of adding ``other`` to DataFrame.
See Also
--------
DataFrame.add : Add a DataFrame and another object, with option for index-
or column-oriented addition.
Examples
--------
>>> df = pd.DataFrame(
... {"height": [1.5, 2.6], "weight": [500, 800]}, index=["elk", "moose"]
... )
>>> df
height weight
elk 1.5 500
moose 2.6 800
Adding a scalar affects all rows and columns.
>>> df[["height", "weight"]] + 1.5
height weight
elk 3.0 501.5
moose 4.1 801.5
Each element of a list is added to a column of the DataFrame, in order.
>>> df[["height", "weight"]] + [0.5, 1.5]
height weight
elk 2.0 501.5
moose 3.1 801.5
Keys of a dictionary are aligned to the DataFrame, based on column names;
each value in the dictionary is added to the corresponding column.
>>> df[["height", "weight"]] + {"height": 0.5, "weight": 1.5}
height weight
elk 2.0 501.5
moose 3.1 801.5
When `other` is a :class:`Series`, the index of `other` is aligned with the
columns of the DataFrame.
>>> s1 = pd.Series([0.5, 1.5], index=["weight", "height"])
>>> df[["height", "weight"]] + s1
height weight
elk 3.0 500.5
moose 4.1 800.5
Even when the index of `other` is the same as the index of the DataFrame,
the :class:`Series` will not be reoriented. If index-wise alignment is desired,
:meth:`DataFrame.add` should be used with `axis='index'`.
>>> s2 = pd.Series([0.5, 1.5], index=["elk", "moose"])
>>> df[["height", "weight"]] + s2
elk height moose weight
elk NaN NaN NaN NaN
moose NaN NaN NaN NaN
>>> df[["height", "weight"]].add(s2, axis="index")
height weight
elk 2.0 500.5
moose 4.1 801.5
When `other` is a :class:`DataFrame`, both columns names and the
index are aligned.
>>> other = pd.DataFrame(
... {"height": [0.2, 0.4, 0.6]}, index=["elk", "moose", "deer"]
... )
>>> df[["height", "weight"]] + other
height weight
deer NaN NaN
elk 1.7 NaN
moose 3.0 NaN
"""
return self._arith_method(other, operator.add)
@unpack_zerodim_and_defer("__radd__")
def __radd__(self, other):
return self._arith_method(other, roperator.radd)
@unpack_zerodim_and_defer("__sub__")
def __sub__(self, other):
return self._arith_method(other, operator.sub)
@unpack_zerodim_and_defer("__rsub__")
def __rsub__(self, other):
return self._arith_method(other, roperator.rsub)
@unpack_zerodim_and_defer("__mul__")
def __mul__(self, other):
return self._arith_method(other, operator.mul)
@unpack_zerodim_and_defer("__rmul__")
def __rmul__(self, other):
return self._arith_method(other, roperator.rmul)
@unpack_zerodim_and_defer("__truediv__")
def __truediv__(self, other):
return self._arith_method(other, operator.truediv)
@unpack_zerodim_and_defer("__rtruediv__")
def __rtruediv__(self, other):
return self._arith_method(other, roperator.rtruediv)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
return self._arith_method(other, operator.floordiv)
@unpack_zerodim_and_defer("__rfloordiv")
def __rfloordiv__(self, other):
return self._arith_method(other, roperator.rfloordiv)
@unpack_zerodim_and_defer("__mod__")
def __mod__(self, other):
return self._arith_method(other, operator.mod)
@unpack_zerodim_and_defer("__rmod__")
def __rmod__(self, other):
return self._arith_method(other, roperator.rmod)
@unpack_zerodim_and_defer("__divmod__")
def __divmod__(self, other):
return self._arith_method(other, divmod)
@unpack_zerodim_and_defer("__rdivmod__")
def __rdivmod__(self, other):
return self._arith_method(other, roperator.rdivmod)
@unpack_zerodim_and_defer("__pow__")
def __pow__(self, other):
return self._arith_method(other, operator.pow)
@unpack_zerodim_and_defer("__rpow__")
def __rpow__(self, other):
return self._arith_method(other, roperator.rpow)
# -----------------------------------------------------------------------------
# Helpers to implement __array_ufunc__
def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
"""
Compatibility with numpy ufuncs.
See also
--------
numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
"""
from pandas.core.frame import (
DataFrame,
Series,
)
from pandas.core.generic import NDFrame
from pandas.core.internals import BlockManager
cls = type(self)
kwargs = _standardize_out_kwarg(**kwargs)
# for binary ops, use our custom dunder methods
result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs)
if result is not NotImplemented:
return result
# Determine if we should defer.
no_defer = (
np.ndarray.__array_ufunc__,
cls.__array_ufunc__,
)
for item in inputs:
higher_priority = (
hasattr(item, "__array_priority__")
and item.__array_priority__ > self.__array_priority__
)
has_array_ufunc = (
hasattr(item, "__array_ufunc__")
and type(item).__array_ufunc__ not in no_defer
and not isinstance(item, self._HANDLED_TYPES)
)
if higher_priority or has_array_ufunc:
return NotImplemented
# align all the inputs.
types = tuple(type(x) for x in inputs)
alignable = [
x for x, t in zip(inputs, types, strict=True) if issubclass(t, NDFrame)
]
if len(alignable) > 1:
# This triggers alignment.
# At the moment, there aren't any ufuncs with more than two inputs
# so this ends up just being x1.index | x2.index, but we write
# it to handle *args.
set_types = set(types)
if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types):
# We currently don't handle ufunc(DataFrame, Series)
# well. Previously this raised an internal ValueError. We might
# support it someday, so raise a NotImplementedError.
raise NotImplementedError(
f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs."
)
axes = self.axes
for obj in alignable[1:]:
# this relies on the fact that we aren't handling mixed
# series / frame ufuncs.
for i, (ax1, ax2) in enumerate(zip(axes, obj.axes, strict=True)):
axes[i] = ax1.union(ax2)
reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes, strict=True))
inputs = tuple(
x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x
for x, t in zip(inputs, types, strict=True)
)
else:
reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes, strict=True))
if self.ndim == 1:
names = {x.name for x in inputs if hasattr(x, "name")}
name = names.pop() if len(names) == 1 else None
reconstruct_kwargs = {"name": name}
else:
reconstruct_kwargs = {}
def reconstruct(result):
if ufunc.nout > 1:
# np.modf, np.frexp, np.divmod
return tuple(_reconstruct(x) for x in result)
return _reconstruct(result)
def _reconstruct(result):
if lib.is_scalar(result):
return result
if result.ndim != self.ndim:
if method == "outer":
raise NotImplementedError
return result
if isinstance(result, BlockManager):
# we went through BlockManager.apply e.g. np.sqrt
result = self._constructor_from_mgr(result, axes=result.axes)
else:
# we converted an array, lost our axes
result = self._constructor(
result, **reconstruct_axes, **reconstruct_kwargs, copy=False
)
# TODO: When we support multiple values in __finalize__, this
# should pass alignable to `__finalize__` instead of self.
# Then `np.add(a, b)` would consider attrs from both a and b
# when a and b are NDFrames.
if len(alignable) == 1:
result = result.__finalize__(self)
return result
if "out" in kwargs:
# e.g. test_multiindex_get_loc
result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs)
return reconstruct(result)
if method == "reduce":
# e.g. test.series.test_ufunc.test_reduce
result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs)
if result is not NotImplemented:
return result
# We still get here with kwargs `axis` for e.g. np.maximum.accumulate
# and `dtype` and `keepdims` for np.ptp
if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1):
# Just give up on preserving types in the complex case.
# In theory we could preserve them for them.
# * nout>1 is doable if BlockManager.apply took nout and
# returned a Tuple[BlockManager].
# * len(inputs) > 1 is doable when we know that we have
# aligned blocks / dtypes.
# e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add
inputs = tuple(np.asarray(x) for x in inputs)
# Note: we can't use default_array_ufunc here bc reindexing means
# that `self` may not be among `inputs`
result = getattr(ufunc, method)(*inputs, **kwargs)
elif self.ndim == 1:
# ufunc(series, ...)
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
else:
# ufunc(dataframe)
if method == "__call__" and not kwargs:
# for np.<ufunc>(..) calls
# kwargs cannot necessarily be handled block-by-block, so only
# take this path if there are no kwargs
mgr = inputs[0]._mgr # pyright: ignore[reportGeneralTypeIssues]
result = mgr.apply(getattr(ufunc, method))
else:
# otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
# Those can have an axis keyword and thus can't be called block-by-block
result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs) # pyright: ignore[reportGeneralTypeIssues]
# e.g. np.negative (only one reached), with "where" and "out" in kwargs
result = reconstruct(result)
return result
def _standardize_out_kwarg(**kwargs) -> dict:
"""
If kwargs contain "out1" and "out2", replace that with a tuple "out"
np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or
`out1=out1, out2=out2)`
"""
if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs:
out1 = kwargs.pop("out1")
out2 = kwargs.pop("out2")
out = (out1, out2)
kwargs["out"] = out
return kwargs
def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
"""
If we have an `out` keyword, then call the ufunc without `out` and then
set the result into the given `out`.
"""
# Note: we assume _standardize_out_kwarg has already been called.
out = kwargs.pop("out")
where = kwargs.pop("where", None)
result = getattr(ufunc, method)(*inputs, **kwargs)
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# i.e. np.divmod, np.modf, np.frexp
if not isinstance(out, tuple) or len(out) != len(result):
raise NotImplementedError
for arr, res in zip(out, result, strict=True):
_assign_where(arr, res, where)
return out
if isinstance(out, tuple):
if len(out) == 1:
out = out[0]
else:
raise NotImplementedError
_assign_where(out, result, where)
return out
def _assign_where(out, result, where) -> None:
"""
Set a ufunc result into 'out', masking with a 'where' argument if necessary.
"""
if where is None:
# no 'where' arg passed to ufunc
out[:] = result
else:
np.putmask(out, where, result)
def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
"""
Fallback to the behavior we would get if we did not define __array_ufunc__.
Notes
-----
We are assuming that `self` is among `inputs`.
"""
if not any(x is self for x in inputs):
raise NotImplementedError
new_inputs = [x if x is not self else np.asarray(x) for x in inputs]
return getattr(ufunc, method)(*new_inputs, **kwargs)
def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
"""
Dispatch ufunc reductions to self's reduction methods.
"""
assert method == "reduce"
if len(inputs) != 1 or inputs[0] is not self:
return NotImplemented
if ufunc.__name__ not in REDUCTION_ALIASES:
return NotImplemented
method_name = REDUCTION_ALIASES[ufunc.__name__]
# NB: we are assuming that min/max represent minimum/maximum methods,
# which would not be accurate for e.g. Timestamp.min
if not hasattr(self, method_name):
return NotImplemented
if self.ndim > 1:
if isinstance(self, ABCNDFrame):
# TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA
kwargs["numeric_only"] = False
if "axis" not in kwargs:
# For DataFrame reductions we don't want the default axis=0
# Note: np.min is not a ufunc, but uses array_function_dispatch,
# so calls DataFrame.min (without ever getting here) with the np.min
# default of axis=None, which DataFrame.min catches and changes to axis=0.
# np.minimum.reduce(df) gets here bc axis is not in kwargs,
# so we set axis=0 to match the behavior of np.minimum.reduce(df.values)
kwargs["axis"] = 0
# By default, numpy's reductions do not skip NaNs, so we have to
# pass skipna=False
return getattr(self, method_name)(skipna=False, **kwargs)
| OpsMixin |
python | pytest-dev__pytest-django | tests/test_db_setup.py | {
"start": 8089,
"end": 9021
} | class ____:
db_settings: ClassVar = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "/tmp/should-not-be-used",
}
}
def test_sqlite_in_memory_used(self, django_pytester: DjangoPytester) -> None:
pytest.importorskip("xdist")
django_pytester.create_test_module(
"""
import pytest
from django.db import connections
@pytest.mark.django_db
def test_a():
(conn, ) = connections.all()
assert conn.vendor == 'sqlite'
db_name = conn.creation._get_test_db_name()
assert 'file:memorydb' in db_name or db_name == ':memory:'
"""
)
result = django_pytester.runpytest_subprocess("--tb=short", "-vv", "-n1")
assert result.ret == 0
result.stdout.fnmatch_lines(["*PASSED*test_a*"])
| TestSqliteWithXdist |
python | realpython__materials | python-parallel-processing/07_image_processing/image_processing_bonus.py | {
"start": 316,
"end": 5344
} | class ____(tk.Tk):
def __init__(self, image: PIL.Image.Image) -> None:
super().__init__()
# Main window
self.title("Exposure and Gamma Correction")
self.resizable(False, False)
# Parameters frame
self.frame = ttk.LabelFrame(self, text="Parameters")
self.frame.pack(fill=tk.X, padx=10, pady=10)
self.frame.columnconfigure(0, weight=0)
self.frame.columnconfigure(1, weight=1)
# Dropdown
self.var_mode = tk.StringVar(value=ProcessingMode.PYTHON)
mode_label = ttk.Label(self.frame, text="Mode:")
mode_label.grid(row=0, column=0, sticky=tk.W, padx=10, pady=10)
mode_dropdown = ttk.Combobox(
self.frame,
textvariable=self.var_mode,
values=list(ProcessingMode),
state="readonly",
)
mode_dropdown.grid(
row=0, column=1, sticky=tk.W + tk.E, padx=10, pady=10
)
# EV slider
self.var_ev = tk.DoubleVar(value=0)
ev_label = ttk.Label(self.frame, text="Exposure:")
ev_label.grid(row=1, column=0, sticky=tk.W, padx=10, pady=10)
ev_slider = ttk.Scale(
self.frame,
from_=-1,
to=1,
orient=tk.HORIZONTAL,
variable=self.var_ev,
)
ev_slider.bind("<B1-Motion>", self.on_slide)
ev_slider.grid(row=1, column=1, sticky=tk.W + tk.E, padx=10, pady=10)
# Gamma slider
self.var_gamma = tk.DoubleVar(value=1)
gamma_label = ttk.Label(self.frame, text="Gamma:")
gamma_label.grid(row=2, column=0, sticky=tk.W, padx=10, pady=10)
gamma_slider = ttk.Scale(
self.frame,
from_=0.1,
to=2,
orient=tk.HORIZONTAL,
variable=self.var_gamma,
)
gamma_slider.bind("<B1-Motion>", self.on_slide)
gamma_slider.grid(
row=2, column=1, sticky=tk.W + tk.E, padx=10, pady=10
)
# Image preview
self.preview = ttk.Label(self, relief=tk.SUNKEN)
self.preview.pack(padx=10, pady=10)
# Status bar
self.var_status = tk.StringVar()
status_bar = ttk.Label(
self,
anchor=tk.W,
relief=tk.SUNKEN,
textvariable=self.var_status,
)
status_bar.pack(side=tk.BOTTOM, fill=tk.X)
# Image pixels
self.pixels = np.array(image)
self.update()
self.show_preview(image)
self.mainloop()
def on_slide(self, *args, **kwargs) -> None:
# Get parameters
ev = 2.0 ** self.var_ev.get()
gamma = 1.0 / self.var_gamma.get()
# Process pixels
t1 = time.perf_counter()
pixels = self.process(ev, gamma)
t2 = time.perf_counter()
# Render preview
image = PIL.Image.fromarray(pixels)
self.show_preview(image)
t3 = time.perf_counter()
# Update status
self.var_status.set(
f"Processed in {(t2 - t1) * 1000:.0f} ms "
f"(Rendered in {(t3 - t1) * 1000:.0f} ms)"
)
def show_preview(self, image: PIL.Image.Image) -> None:
scale = 0.75
offset = 2.0 * self.frame.winfo_height()
image.thumbnail(
(
int(self.winfo_screenwidth() * scale),
int(self.winfo_screenheight() * scale - offset),
)
)
image_tk = PIL.ImageTk.PhotoImage(image)
self.preview.configure(image=image_tk)
self.preview.image = image_tk
def process(self, ev: float, gamma: float) -> np.ndarray:
match mode := self.var_mode.get():
case ProcessingMode.PYTHON:
return process_python(self.pixels, ev, gamma)
case ProcessingMode.NUMPY:
return process_numpy(self.pixels, ev, gamma)
case ProcessingMode.PARALLEL:
parallel.process(pixels := self.pixels.copy(), ev, gamma)
return pixels
case _:
raise ValueError(f"Invalid mode: {mode}")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("image_path", type=pathlib.Path)
return parser.parse_args()
def main(args: argparse.Namespace) -> None:
with PIL.Image.open(args.image_path) as image:
AppWindow(image)
def process_python(pixels: np.ndarray, ev: float, gamma: float) -> np.ndarray:
lookup_table = [
int(min(max(0, (((i / 255.0) * ev) ** gamma) * 255), 255))
for i in range(256)
]
values = [lookup_table[x] for x in pixels.flat]
return np.array(values).astype(np.uint8).reshape(pixels.shape)
def process_numpy(pixels: np.ndarray, ev: float, gamma: float) -> np.ndarray:
lookup_table = (
((((np.arange(256) / 255.0) * ev) ** gamma) * 255)
.clip(0, 255)
.astype(np.uint8)
)
return lookup_table[pixels]
if __name__ == "__main__":
main(parse_args())
| AppWindow |
python | ray-project__ray | python/ray/serve/tests/unit/test_deployment_state.py | {
"start": 93761,
"end": 99003
} | class ____:
def test_default_value(self):
actor_replica = ActorReplicaWrapper(
version=deployment_version("1"),
replica_id=ReplicaID(
"abc123",
deployment_id=DeploymentID(name="test_deployment", app_name="test_app"),
),
)
assert (
actor_replica.graceful_shutdown_timeout_s
== DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S
)
assert actor_replica.max_ongoing_requests == DEFAULT_MAX_ONGOING_REQUESTS
assert actor_replica.health_check_period_s == DEFAULT_HEALTH_CHECK_PERIOD_S
assert actor_replica.health_check_timeout_s == DEFAULT_HEALTH_CHECK_TIMEOUT_S
def test_max_concurrency_override(self):
actor_replica = ActorReplicaWrapper(
version=deployment_version("1"),
replica_id=ReplicaID(
"abc123",
deployment_id=DeploymentID(name="test_deployment", app_name="test_app"),
),
)
max_ongoing_requests = DEFAULT_MAX_CONCURRENCY_ASYNC + 1
d_info, _ = deployment_info(max_ongoing_requests=max_ongoing_requests)
replica_scheduling_request = actor_replica.start(
d_info, assign_rank_callback=lambda x: 0
)
assert (
"max_concurrency" in replica_scheduling_request.actor_options
and replica_scheduling_request.actor_options["max_concurrency"]
== max_ongoing_requests
)
def test_get_active_node_ids(mock_deployment_state_manager):
"""Test get_active_node_ids() are collecting the correct node ids
When there are no running replicas, both methods should return empty results. When
the replicas are in the RUNNING state, get_running_replica_node_ids() should return
a list of all node ids. `get_active_node_ids()` should return a set
of all node ids.
"""
node1 = NodeID.from_random().hex()
node2 = NodeID.from_random().hex()
node_ids = (node1, node2, node2)
create_dsm, _, cluster_node_info_cache, _ = mock_deployment_state_manager
dsm = create_dsm()
cluster_node_info_cache.add_node(node1)
cluster_node_info_cache.add_node(node2)
# Deploy deployment with version "1" and 3 replicas
info1, v1 = deployment_info(version="1", num_replicas=3)
assert dsm.deploy(TEST_DEPLOYMENT_ID, info1)
ds = dsm._deployment_states[TEST_DEPLOYMENT_ID]
# When the replicas are in the STARTING state, `get_active_node_ids()` should
# return a set of node ids.
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.STARTING, 3, v1)])
mocked_replicas = ds._replicas.get()
for idx, mocked_replica in enumerate(mocked_replicas):
mocked_replica._actor.set_node_id(node_ids[idx])
assert ds.get_active_node_ids() == set(node_ids)
assert dsm.get_active_node_ids() == set(node_ids)
# When the replicas are in RUNNING state, `get_active_node_ids()` should
# return a set of `node_ids`.
for mocked_replica in mocked_replicas:
mocked_replica._actor.set_ready()
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.RUNNING, 3, v1)])
assert ds.get_active_node_ids() == set(node_ids)
assert dsm.get_active_node_ids() == set(node_ids)
for _ in mocked_replicas:
ds._stop_one_running_replica_for_testing()
dsm.update()
check_counts(
ds,
total=6,
by_state=[(ReplicaState.STOPPING, 3, v1), (ReplicaState.STARTING, 3, v1)],
)
def test_get_active_node_ids_none(mock_deployment_state_manager):
"""Test get_active_node_ids() are not collecting none node ids.
When the running replicas has None as the node id, `get_active_node_ids()` should
not include it in the set.
"""
node1 = NodeID.from_random().hex()
node2 = NodeID.from_random().hex()
node_ids = (node1, node2, node2)
create_dsm, _, cluster_node_info_cache, _ = mock_deployment_state_manager
dsm = create_dsm()
cluster_node_info_cache.add_node(node1)
cluster_node_info_cache.add_node(node2)
# Deploy deployment with version "1" and 3 replicas
info1, v1 = deployment_info(version="1", num_replicas=3)
assert dsm.deploy(TEST_DEPLOYMENT_ID, info1)
ds = dsm._deployment_states[TEST_DEPLOYMENT_ID]
# When the replicas are in the STARTING state, `get_active_node_ids()` should
# return a set of node ids.
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.STARTING, 3, v1)])
mocked_replicas = ds._replicas.get()
for idx, mocked_replica in enumerate(mocked_replicas):
mocked_replica._actor.set_node_id(node_ids[idx])
assert ds.get_active_node_ids() == set(node_ids)
assert dsm.get_active_node_ids() == set(node_ids)
# When the replicas are in the RUNNING state and are having None node id,
# `get_active_node_ids()` should return empty set.
for mocked_replica in mocked_replicas:
mocked_replica._actor.set_node_id(None)
mocked_replica._actor.set_ready()
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.RUNNING, 3, v1)])
assert None not in ds.get_active_node_ids()
assert None not in dsm.get_active_node_ids()
| TestActorReplicaWrapper |
python | pyca__cryptography | tests/hazmat/primitives/test_aead.py | {
"start": 19032,
"end": 26979
} | class ____:
@pytest.mark.skipif(
sys.platform not in {"linux", "darwin"} or sys.maxsize < 2**31,
reason="mmap and 64-bit platform required",
)
def test_data_too_large(self):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
nonce = b"0" * 12
large_data = large_mmap()
with pytest.raises(OverflowError):
aesgcm.encrypt(nonce, large_data, b"")
with pytest.raises(OverflowError):
aesgcm.encrypt(nonce, b"", large_data)
def test_decrypt_data_too_short(self):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
with pytest.raises(InvalidTag):
aesgcm.decrypt(b"0" * 12, b"0", None)
with pytest.raises(InvalidTag):
buf = bytearray(16)
aesgcm.decrypt_into(b"0" * 12, b"0", None, buf)
def test_vectors(self, backend, subtests):
vectors = _load_gcm_vectors()
for vector in vectors:
with subtests.test():
nonce = binascii.unhexlify(vector["iv"])
if backend._fips_enabled and len(nonce) != 12:
# Red Hat disables non-96-bit IV support as part of its
# FIPS patches.
pytest.skip("Non-96-bit IVs unsupported in FIPS mode.")
key = binascii.unhexlify(vector["key"])
aad = binascii.unhexlify(vector["aad"])
ct = binascii.unhexlify(vector["ct"])
pt = binascii.unhexlify(vector.get("pt", b""))
tag = binascii.unhexlify(vector["tag"])
aesgcm = AESGCM(key)
if vector.get("fail") is True:
with pytest.raises(InvalidTag):
aesgcm.decrypt(nonce, ct + tag, aad)
else:
computed_ct = aesgcm.encrypt(nonce, pt, aad)
assert computed_ct[:-16] == ct
assert computed_ct[-16:] == tag
computed_pt = aesgcm.decrypt(nonce, ct + tag, aad)
assert computed_pt == pt
@pytest.mark.parametrize(
("nonce", "data", "associated_data"),
[
[object(), b"data", b""],
[b"0" * 12, object(), b""],
[b"0" * 12, b"data", object()],
],
)
def test_params_not_bytes(self, nonce, data, associated_data, backend):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
with pytest.raises(TypeError):
aesgcm.encrypt(nonce, data, associated_data)
with pytest.raises(TypeError):
aesgcm.decrypt(nonce, data, associated_data)
@pytest.mark.parametrize("length", [7, 129])
def test_invalid_nonce_length(self, length, backend):
if backend._fips_enabled:
# Red Hat disables non-96-bit IV support as part of its FIPS
# patches.
pytest.skip("Non-96-bit IVs unsupported in FIPS mode.")
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
with pytest.raises(ValueError):
aesgcm.encrypt(b"\x00" * length, b"hi", None)
with pytest.raises(ValueError):
buf = bytearray(length)
aesgcm.encrypt_into(b"\x00" * length, b"hi", None, buf)
with pytest.raises(ValueError):
aesgcm.decrypt(b"\x00" * length, b"hi", None)
with pytest.raises(ValueError):
buf = bytearray(16)
aesgcm.decrypt_into(b"\x00" * length, b"hi", None, buf)
def test_bad_key(self, backend):
with pytest.raises(TypeError):
AESGCM(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
AESGCM(b"0" * 31)
def test_bad_generate_key(self, backend):
with pytest.raises(TypeError):
AESGCM.generate_key(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
AESGCM.generate_key(129)
def test_associated_data_none_equal_to_empty_bytestring(self, backend):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
nonce = os.urandom(12)
ct1 = aesgcm.encrypt(nonce, b"some_data", None)
ct2 = aesgcm.encrypt(nonce, b"some_data", b"")
assert ct1 == ct2
pt1 = aesgcm.decrypt(nonce, ct1, None)
pt2 = aesgcm.decrypt(nonce, ct2, b"")
assert pt1 == pt2
def test_buffer_protocol(self, backend):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
pt = b"encrypt me"
ad = b"additional"
nonce = os.urandom(12)
ct = aesgcm.encrypt(nonce, pt, ad)
computed_pt = aesgcm.decrypt(nonce, ct, ad)
assert computed_pt == pt
aesgcm2 = AESGCM(bytearray(key))
ct2 = aesgcm2.encrypt(bytearray(nonce), bytearray(pt), bytearray(ad))
assert ct2 == ct
b_nonce = bytearray(nonce)
b_ct2 = bytearray(ct2)
b_ad = bytearray(ad)
computed_pt2 = aesgcm2.decrypt(b_nonce, b_ct2, b_ad)
assert computed_pt2 == pt
aesgcm3 = AESGCM(memoryview(key))
m_nonce = memoryview(nonce)
m_pt = memoryview(pt)
m_ad = memoryview(ad)
ct3 = aesgcm3.encrypt(m_nonce, m_pt, m_ad)
assert ct3 == ct
m_ct3 = memoryview(ct3)
computed_pt3 = aesgcm3.decrypt(m_nonce, m_ct3, m_ad)
assert computed_pt3 == pt
def test_encrypt_into(self, backend):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
nonce = os.urandom(12)
pt = b"encrypt me"
ad = b"additional"
buf = bytearray(len(pt) + 16)
n = aesgcm.encrypt_into(nonce, pt, ad, buf)
assert n == len(pt) + 16
ct = aesgcm.encrypt(nonce, pt, ad)
assert buf == ct
@pytest.mark.parametrize(
("ptlen", "buflen"), [(10, 25), (10, 27), (15, 30), (20, 37)]
)
def test_encrypt_into_buffer_incorrect_size(self, ptlen, buflen, backend):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
nonce = os.urandom(12)
pt = b"x" * ptlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
aesgcm.encrypt_into(nonce, pt, None, buf)
def test_decrypt_into(self, backend):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
nonce = os.urandom(12)
pt = b"decrypt me"
ad = b"additional"
ct = aesgcm.encrypt(nonce, pt, ad)
buf = bytearray(len(pt))
n = aesgcm.decrypt_into(nonce, ct, ad, buf)
assert n == len(pt)
assert buf == pt
@pytest.mark.parametrize(
("ctlen", "buflen"), [(26, 9), (26, 11), (31, 14), (36, 21)]
)
def test_decrypt_into_buffer_incorrect_size(self, ctlen, buflen, backend):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
nonce = os.urandom(12)
ct = b"x" * ctlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
aesgcm.decrypt_into(nonce, ct, None, buf)
def test_decrypt_into_invalid_tag(self, backend):
key = AESGCM.generate_key(128)
aesgcm = AESGCM(key)
nonce = os.urandom(12)
pt = b"some data"
ad = b"additional"
ct = aesgcm.encrypt(nonce, pt, ad)
# Corrupt the ciphertext
corrupted_ct = bytearray(ct)
corrupted_ct[0] ^= 1
buf = bytearray(len(pt))
with pytest.raises(InvalidTag):
aesgcm.decrypt_into(nonce, bytes(corrupted_ct), ad, buf)
@pytest.mark.skipif(
_aead_supported(AESOCB3),
reason="Requires OpenSSL without AESOCB3 support",
)
def test_aesocb3_unsupported_on_older_openssl(backend):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
AESOCB3(AESOCB3.generate_key(128))
@pytest.mark.skipif(
not _aead_supported(AESOCB3),
reason="Does not support AESOCB3",
)
| TestAESGCM |
python | realpython__materials | python-guitar-synthesizer/source_code_final/src/digitar/burst.py | {
"start": 119,
"end": 249
} | class ____(Protocol):
def __call__(
self, num_samples: int, sampling_rate: Hertz
) -> np.ndarray: ...
| BurstGenerator |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_gradients.py | {
"start": 2288,
"end": 15397
} | class ____:
"""Gradients Debugger.
Allows retrieval of gradient tensors created by TensorFlow's automatic
differentiation algorithm, i.e., `tf.gradients` and optimizer classes that
use it.
"""
# TODO(cais): Add examples code in the doc string?
def __init__(self, y_tensor=None):
"""Constructor of GradientsDebugger.
Args:
y_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor
on the numerator of the differentiation.
"""
self._uuid = uuid.uuid4().hex
_gradient_debuggers[self._uuid] = self
# A dict mapping x-tensor names to gradient tensor. x-tensor refers to the
# independent tf.Tensor, i.e., the tensor on the denominator of the
# differentiation.
self._gradient_tensors = {}
self._y_tensor = y_tensor
self._graph = None
if y_tensor:
self._graph = y_tensor.graph
self._is_active_context = False
@property
def y_tensor(self):
return self._y_tensor
@property
def graph(self):
return self._graph
def __enter__(self):
self._is_active_context = True
def __exit__(self, unused_type, unused_value, unused_traceback):
self._is_active_context = False
def identify_gradient(self, input_tensor):
"""Create a debug identity tensor that registers and forwards gradients.
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `input_tensor`, the gradient
tensor(s) with respect to `input_tensor` will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x)
grad_debugger = tf_debug.GradientsDebugger()
debug_y = grad_debugger.identify_gradient(y)
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
with grad_debugger:
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
```
Args:
input_tensor: the input `tf.Tensor` object whose related gradient tensors
are to be registered with this `GradientsDebugger` instance when they
are created, e.g., during `tf.gradients` calls or the construction
of optimization (training) op that uses `tf.gradients`.
Returns:
A forwarded identity of `input_tensor`, as a `tf.Tensor`.
Raises:
ValueError: If an op with name that duplicates the gradient-debugging op
already exists in the graph (highly unlikely).
"""
# TODO(cais): Allow overriding gradient.
# TODO(cais): Implement value_stack.
grad_debug_op_name = _tensor_to_grad_debug_op_name(input_tensor, self._uuid)
# pylint: disable=protected-access
identity_op = (
gen_array_ops.debug_gradient_ref_identity
if input_tensor.dtype._is_ref_dtype else
gen_array_ops.debug_gradient_identity)
# pylint: enable=protected-access
debug_grad_identity = identity_op(input_tensor, name=grad_debug_op_name)
assert debug_grad_identity.dtype == input_tensor.dtype
if debug_grad_identity.op.name != grad_debug_op_name:
raise ValueError(
"The graph already contains an op named %s" % grad_debug_op_name)
return debug_grad_identity
def watch_gradients_by_tensors(self, graph, tensors):
"""Watch gradient tensors by x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `x_tensor`s, the gradient
tensor(s) with respect to the tensor will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Unlike the method `identify_gradient`, this method is used to retrieve
gradient tensors after the construction of the forward subgraph has
completed (but before the construction of the backward subgraph).
This method is the same as `watch_gradients_by_x_tensor_names` except that
the tensors are specified by the Python `tf.Tensor` or `tf.Variable`
objects, instead by name patterns.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(y):
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
# or
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensors: a `tf.Tensor` or `tf.Variable` object, or a list of such objects.
Returns:
The GradientsDebugger instance itself.
"""
if not isinstance(tensors, list):
tensors = [tensors]
tensor_name_regex = []
for tensor in tensors:
tensor_name_regex.append(re.escape(tensor.name) + "$")
tensor_name_regex = "(" + "|".join(tensor_name_regex) + ")"
return self.watch_gradients_by_tensor_names(graph, tensor_name_regex)
def watch_gradients_by_tensor_names(self, graph, tensor_name_regex):
"""Watch gradient tensors by name(s) of the x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the x-tensors, the gradient tensor(s) will be registered
with this `GradientsDebugger` instance and can later be retrieved.
Unlike the `identify_gradient` method, this method is used after the
construction of the forward graph has completed. Unlike the
`watch_gradients_by_tensor` method, this method does not use handles to the
tensors of interest; it uses their names.
This method is the same as `watch_gradients_by_tensors` except that the
x-tensors are specified by name patterns, instead of `tf.Tensor` or
`tf.Variable` objects.
Example:
```python
x = tf.Variable(1.0, name="x")
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(r"(x|y):0$"):
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to x and y.
x_grad = grad_debugger.gradient_tensor("x:0")
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensor_name_regex: the regular-expression pattern of the name(s) of the
x-tensor(s) to watch. x-tensor refers to the tensors on the denominator
of the differentiation.
Returns:
The GradientsDebugger instance itself.
"""
tensor_name_pattern = re.compile(tensor_name_regex)
with graph.as_default():
for op in graph.get_operations():
for output in op.outputs:
if tensor_name_pattern.match(output.name):
debug_op = self.identify_gradient(output)
# Make a copy of output.consumers() since we'll modify the consumers
# TODO(skyewm): this is unnecessary once the C API is enabled
for consumer in list(output.consumers()):
if consumer == debug_op.op:
continue
# Locate the slot index of the original input.
for i, consumer_input in enumerate(consumer.inputs):
if consumer_input == output:
consumer._update_input(i, debug_op) # pylint: disable=protected-access
return self
def _check_same_graph(self, tensor):
if self._graph is None:
self._graph = tensor.graph
elif self._graph != tensor.graph:
raise ValueError(
"The graph of the value (%s) is not the same as the graph %s" %
(tensor.graph, self._graph))
def register_gradient_tensor(self,
x_tensor_name,
gradient_tensor):
"""Register the gradient tensor for an x-tensor.
Args:
x_tensor_name: (`str`) the name of the independent `tf.Tensor`, i.e.,
the tensor on the denominator of the differentiation.
gradient_tensor: the gradient `tf.Tensor`.
"""
if len(_gradient_debuggers) == 1 or self._is_active_context:
self._check_same_graph(gradient_tensor)
self._gradient_tensors[x_tensor_name] = gradient_tensor
def gradient_tensor(self, x_tensor):
"""Get the gradient tensor of an x-tensor.
Args:
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
Returns:
If found, the gradient tensor.
Raises:
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
LookupError: If the `x_tensor` has not been registered with a gradient
tensor.
"""
x_tensor_name = self._get_tensor_name(x_tensor)
if x_tensor_name not in self._gradient_tensors:
raise LookupError(
"This GradientsDebugger has not received any gradient tensor for "
"x-tensor %s" % x_tensor_name)
return self._gradient_tensors[x_tensor_name]
def gradient_tensors(self):
"""Get the gradient tensors that this object is aware of.
Returns:
A dict mapping x-tensor names to gradient tensor objects. x-tensor refers
to the tensors on the denominator of the differentation.
"""
return self._gradient_tensors
def _get_tensor_name(self, tensor):
if isinstance(tensor, (tensor_lib.Tensor, variables.Variable)):
return tensor.name
elif isinstance(tensor, str):
return tensor
else:
raise TypeError(
"x_tensor must be a str or tf.Tensor or tf.Variable, "
"but instead has type %s" % type(tensor))
def clear_gradient_debuggers():
"""Clear all globally registered gradient debuggers."""
_gradient_debuggers.clear()
@ops.RegisterGradient("DebugGradientIdentity")
def _identify_gradient_grad(op, dy):
"""Gradient function for the DebugIdentity op."""
# TODO(cais): Allow overriding gradient.
grad_debugger_uuid, orig_tensor_name = _parse_grad_debug_op_name(op.name)
grad_debugger = _gradient_debuggers[grad_debugger_uuid]
grad_debugger.register_gradient_tensor(orig_tensor_name, dy)
return dy
@ops.RegisterGradient("DebugGradientRefIdentity")
def _identify_gradient_grad_ref(op, dy):
"""Gradient function for the DebugIdentity op."""
return _identify_gradient_grad(op, dy)
def gradient_values_from_dump(grad_debugger, x_tensor, dump):
"""Find gradient values from a `DebugDumpDir` object.
Args:
grad_debugger: the `tf_debug.GradientsDebugger` instance to be used.
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
dump: A `tfdbg.DebugDumpDir` object.
Returns:
If this `GradientsDebugger` instance has the gradient tensor of `x_tensor`
registered: a list of `numpy.ndarray` representing the value of the
gradient tensor from `dump`. The list could be empty, if the gradient
tensor is not executed in the `tf.Session.run()` call that generated
the `dump`. The list could also contain multiple values of the gradient
tensor, e.g., if gradient tensor is computed repeatedly in a
`tf.while_loop` during the run that generated the `dump`.
Raises:
LookupError: If this `GradientsDebugger` instance does not have the
gradient tensor of `x_tensor` registered.
ValueError: If this `GradientsDebugger` has a `tf.Graph` object that
does not match the `tf.Graph` object of the `dump`.
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
"""
# TODO(cais): Use this method in LocalCLIDebugWrapperSession to present the
# gradient tensors to the TFDBG CLI.
# If possible, verify that the Python graph of the dump and that of this
# GradientsDebugger match.
if (dump.python_graph and grad_debugger.graph and
dump.python_graph != grad_debugger.graph):
raise ValueError(
"This GradientsDebugger instance has a graph (%s) that differs from "
"the graph of the DebugDumpDir object (%s)." %
(grad_debugger.graph, dump.python_graph))
gradient_tensor = grad_debugger.gradient_tensor(x_tensor)
node_name, output_slot = debug_graphs.parse_node_or_tensor_name(
gradient_tensor.name)
try:
return dump.get_tensors(node_name, output_slot, "DebugIdentity")
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
return []
| GradientsDebugger |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 692974,
"end": 693438
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of MarkProjectV2AsTemplate"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project_v2")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project_v2 = sgqlc.types.Field("ProjectV2", graphql_name="projectV2")
"""The project."""
| MarkProjectV2AsTemplatePayload |
python | fluentpython__example-code-2e | 05-data-classes/dataclass/hackerclub_annotated.py | {
"start": 1107,
"end": 1542
} | class ____(ClubMember):
all_handles: ClassVar[set[str]] = set()
handle: str = ''
def __post_init__(self):
cls = self.__class__
if self.handle == '':
self.handle = self.name.split()[0]
if self.handle in cls.all_handles:
msg = f'handle {self.handle!r} already exists.'
raise ValueError(msg)
cls.all_handles.add(self.handle)
# end::HACKERCLUB[]
| HackerClubMember |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass1.py | {
"start": 1195,
"end": 1387
} | class ____:
bbb: int
aaa: str = "string"
# This should not generate an error because
# the ordering requirement is not enforced when
# init=False.
ccc: str
@dataclass
| DC4 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_index.py | {
"start": 643,
"end": 791
} | class ____:
def __index__(self):
return return_index() # [invalid-index-return]
# These testcases should NOT raise errors
| ComplexReturn |
python | kamyu104__LeetCode-Solutions | Python/distinct-echo-substrings.py | {
"start": 122,
"end": 1273
} | class ____(object):
def distinctEchoSubstrings(self, text):
"""
:type text: str
:rtype: int
"""
def KMP(text, l, result):
prefix = [-1]*(len(text)-l)
j = -1
for i in xrange(1, len(prefix)):
while j > -1 and text[l+j+1] != text[l+i]:
j = prefix[j]
if text[l+j+1] == text[l+i]:
j += 1
prefix[i] = j
if (j+1) and (i+1) % ((i+1) - (j+1)) == 0 and \
(i+1) // ((i+1) - (j+1)) % 2 == 0:
result.add(text[l:l+i+1])
return len(prefix)-(prefix[-1]+1) \
if prefix[-1]+1 and len(prefix) % (len(prefix)-(prefix[-1]+1)) == 0 \
else float("inf")
result = set()
i, l = 0, len(text)-1
while i < l: # aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabcdefabcdefabcdef
l = min(l, i + KMP(text, i, result))
i += 1
return len(result)
# Time: O(n^2 + d), d is the duplicated of result substrings size
# Space: O(r), r is the size of result substrings set
| Solution |
python | oauthlib__oauthlib | tests/openid/connect/core/endpoints/test_openid_connect_params_handling.py | {
"start": 371,
"end": 3012
} | class ____(TestCase):
def setUp(self):
self.mock_validator = mock.MagicMock()
self.mock_validator.authenticate_client.side_effect = self.set_client
grant = AuthorizationCodeGrant(request_validator=self.mock_validator)
bearer = BearerToken(self.mock_validator)
self.endpoint = AuthorizationEndpoint(grant, bearer,
response_types={'code': grant})
params = {
'prompt': 'consent',
'display': 'touch',
'nonce': 'abcd',
'state': 'abc',
'redirect_uri': 'https://a.b/cb',
'response_type': 'code',
'client_id': 'abcdef',
'scope': 'hello openid',
'ui_locales': 'en-US'
}
self.url = 'http://a.b/path?' + urlencode(params)
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
@mock.patch('oauthlib.common.generate_token')
def test_authorization_endpoint_handles_prompt(self, generate_token):
generate_token.return_value = "MOCK_CODE"
# In the GET view:
scopes, creds = self.endpoint.validate_authorization_request(self.url)
# In the POST view:
creds['scopes'] = scopes
h, b, s = self.endpoint.create_authorization_response(self.url,
credentials=creds)
expected = 'https://a.b/cb?state=abc&code=MOCK_CODE'
self.assertURLEqual(h['Location'], expected)
self.assertIsNone(b)
self.assertEqual(s, 302)
def test_prompt_none_exclusiveness(self):
"""
Test that prompt=none can't be used with another prompt value.
"""
params = {
'prompt': 'none consent',
'state': 'abc',
'redirect_uri': 'https://a.b/cb',
'response_type': 'code',
'client_id': 'abcdef',
'scope': 'hello openid'
}
url = 'http://a.b/path?' + urlencode(params)
with self.assertRaises(InvalidRequestError):
self.endpoint.validate_authorization_request(url)
def test_oidc_params_preservation(self):
"""
Test that the nonce parameter is passed through.
"""
scopes, creds = self.endpoint.validate_authorization_request(self.url)
self.assertEqual(creds['prompt'], {'consent'})
self.assertEqual(creds['nonce'], 'abcd')
self.assertEqual(creds['display'], 'touch')
self.assertEqual(creds['ui_locales'], ['en-US'])
| OpenIDConnectEndpointTest |
python | PyCQA__pylint | tests/functional/m/member/member_checks.py | {
"start": 4191,
"end": 4330
} | class ____:
__slots__ = ('a', )
def __init__(self):
var = self.teta # [no-member]
self.teta = 24
| InvalidAccessBySlots |
python | realpython__materials | django-todo-list/source_code_final/todo_app/migrations/0001_initial.py | {
"start": 148,
"end": 1287
} | class ____(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ToDoList',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='ToDoItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(blank=True, null=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('due_date', models.DateTimeField(default=todo_app.models.one_week_hence)),
('todo_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='todo_app.todolist')),
],
options={
'ordering': ['due_date'],
},
),
]
| Migration |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 55218,
"end": 58432
} | class ____(Operation):
def __init__(self, from_logits=False, axis=-1, *, name=None):
super().__init__(name=name)
self.from_logits = from_logits
self.axis = axis
def call(self, target, output):
return backend.nn.categorical_crossentropy(
target, output, from_logits=self.from_logits, axis=self.axis
)
def compute_output_spec(self, target, output):
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
return KerasTensor(output.shape[:-1], dtype=output.dtype)
@keras_export(
[
"keras.ops.categorical_crossentropy",
"keras.ops.nn.categorical_crossentropy",
]
)
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Computes categorical cross-entropy loss between target and output tensor.
The categorical cross-entropy loss is commonly used in multi-class
classification tasks where each input sample can belong to one of
multiple classes. It measures the dissimilarity
between the target and output probabilities or logits.
Args:
target: The target tensor representing the true categorical labels.
Its shape should match the shape of the `output` tensor
except for the last dimension.
output: The output tensor representing the predicted probabilities
or logits. Its shape should match the shape of the `target`
tensor except for the last dimension.
from_logits: (optional) Whether `output` is a tensor of logits or
probabilities.
Set it to `True` if `output` represents logits; otherwise,
set it to `False` if `output` represents probabilities.
Defaults to `False`.
axis: (optional) The axis along which the categorical cross-entropy
is computed.
Defaults to `-1`, which corresponds to the last dimension of
the tensors.
Returns:
Integer tensor: The computed categorical cross-entropy loss between
`target` and `output`.
Example:
>>> target = keras.ops.convert_to_tensor(
... [[1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]])
>>> output = keras.ops.convert_to_tensor(
... [[0.9, 0.05, 0.05],
... [0.1, 0.8, 0.1],
... [0.2, 0.3, 0.5]])
>>> categorical_crossentropy(target, output)
array([0.10536054 0.22314355 0.6931472 ], shape=(3,), dtype=float32)
"""
if any_symbolic_tensors((target, output)):
return CategoricalCrossentropy(
from_logits=from_logits, axis=axis
).symbolic_call(target, output)
return backend.nn.categorical_crossentropy(
target, output, from_logits=from_logits, axis=axis
)
| CategoricalCrossentropy |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/cluster_coordinator.py | {
"start": 11564,
"end": 12242
} | class ____(Closure):
"""A closure that builds a resource on a worker.
ResourceClosures keep a reference to the closure object, which is used to
rerun the closure upon recovery to ensure workers have access to the
resources they need.
"""
def _init_remote_value(self):
return RemoteValueImpl(self, self._output_type_spec)
def build_output_remote_value(self):
if self._output_remote_value_ref is None:
# We need to remember the Closure object in the `RemoteValue` here.
ret = self._init_remote_value()
self._output_remote_value_ref = weakref.ref(ret)
return ret
else:
return self._output_remote_value_ref()
| ResourceClosure |
python | realpython__materials | python-mutable-immutable/point.py | {
"start": 594,
"end": 809
} | class ____:
x = Coordinate()
y = Coordinate()
def __init__(self, x, y):
self._x = x
self._y = y
def __repr__(self):
return f"{type(self).__name__}(x={self.x}, y={self.y})"
| Point |
python | geekcomputers__Python | BlackJack_game/blackjack_rr.py | {
"start": 1730,
"end": 6982
} | class ____:
def __init__(self):
self.total = 100
self.bet = 0
def win_bet(self):
self.total += self.bet
def lose_bet(self):
self.total -= self.bet
def take_bet(chips):
while True:
try:
chips.bet = int(input("How many chips would you like to bet? "))
except ValueError:
print("Your bet must be an integer! Try again.")
else:
if chips.bet > chips.total or chips.bet <= 0:
print(
"Your bet cannot exceed your balance and you have to enter a positive bet! Your current balance is: ",
chips.total,
)
else:
break
def hit(deck, hand):
hand.add_card(deck.deal())
hand.adjust_for_ace()
def hit_or_stand(deck, hand):
global playing
while True:
x = input("Would you like to Hit or Stand? Enter '1' or '0' ")
if x.lower() == "1":
hit(deck, hand)
elif x.lower() == "0":
print("You chose to stand. Dealer will hit.")
playing = False
else:
print("Wrong input, please try again.")
continue
break
def show_some(player, dealer):
print("\nDealer's Hand:")
print(" { hidden card }")
print("", dealer.cards[1])
print("\nYour Hand:", *player.cards, sep="\n ")
def show_all(player, dealer):
print("\nDealer's Hand:", *dealer.cards, sep="\n ")
print("Dealer's Hand =", dealer.value)
print("\nYour Hand:", *player.cards, sep="\n ")
print("Your Hand =", player.value)
def player_busts(player, dealer, chips):
print("You are BUSTED !")
chips.lose_bet()
def player_wins(player, dealer, chips):
print("You are the winner!")
chips.win_bet()
def dealer_busts(player, dealer, chips):
print("Dealer has BUSTED !")
chips.win_bet()
def dealer_wins(player, dealer, chips):
print("Dealer is the winner!")
chips.lose_bet()
def push(player, dealer):
print("The match is tie !")
# GAMEPLAY
player_chips = Chips()
while True:
print("\t **********************************************************")
print(
"\t Welcome to the game Casino - BLACK JACK ! "
)
print("\t **********************************************************")
print(Colour.BLACK + "\t ***************")
print("\t * A *")
print("\t * *")
print("\t * * *")
print("\t * *** *")
print("\t * ***** *")
print("\t * *** *")
print("\t * * *")
print("\t * *")
print("\t * *")
print("\t ***************" + Colour.END)
print(
"\nRULES: Get as close to 21 as you can but if you get more than 21 you will lose!\n Aces count as 1 or 11."
)
deck = Deck()
deck.shuffle()
player_hand = Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
dealer_hand = Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
take_bet(player_chips)
show_some(player_hand, dealer_hand)
while playing:
hit_or_stand(deck, player_hand)
show_some(player_hand, dealer_hand)
if player_hand.value > 21:
player_busts(player_hand, dealer_hand, player_chips)
break
if player_hand.value <= 21:
while dealer_hand.value < 17:
hit(deck, dealer_hand)
show_all(player_hand, dealer_hand)
if dealer_hand.value > 21:
dealer_busts(player_hand, dealer_hand, player_chips)
elif dealer_hand.value > player_hand.value:
dealer_wins(player_hand, dealer_hand, player_chips)
elif dealer_hand.value < player_hand.value:
player_wins(player_hand, dealer_hand, player_chips)
else:
push(player_hand, dealer_hand)
print("\nYour current balance stands at", player_chips.total)
if player_chips.total > 0:
new_game = input("Would you like to play another hand? Enter '1' or '0' ")
if new_game.lower() == "1":
playing = True
continue
else:
print(
"Thanks for playing!\n"
+ Colour.GREEN
+ "\t$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n \t Congratulations! You won "
+ str(player_chips.total)
+ " coins!\n\t$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n "
+ Colour.END
)
break
else:
print(
"Oops! You have bet all your chips and we are sorry you can't play more.\nThanks for playing! Do come again to Casino BLACK JACK!"
)
break
| Chips |
python | BugSplat-Git__bugsplat-py | src/bugsplat/bugsplat.py | {
"start": 232,
"end": 3532
} | class ____:
def __init__(self,
database: str,
application: str,
version: str,
logger: logging.Logger = None):
self.database = database
self.application = application
self.version = version
self.additional_file_paths = []
self.app_key = ''
self.description = ''
self.email = ''
self.user = ''
self.logger = logger or logging.getLogger(__name__)
def set_default_additional_file_paths(self, additional_file_paths: List[PathLike]):
self.additional_file_paths = additional_file_paths
def set_default_app_key(self, key: str):
self.app_key = key
def set_default_description(self, description: str):
self.description = description
def set_default_email(self, email: str):
self.email = email
def set_default_user(self, user: str):
self.user = user
def post(self,
ex: BaseException or str,
additional_file_paths: List[PathLike] = None,
app_key: str = '',
description: str = '',
email: str = '',
user: str = ''):
if not additional_file_paths:
additional_file_paths = self.additional_file_paths
self.logger.info('\nBugSplat caught an Unhandled Exception!\n')
# TODO BG what if ex is not defined? Do we care?
# https://stackoverflow.com/q/3702675/4272428
callstack = self._convert_exception_to_json(ex)
self.logger.info(f'About to post crash to database {self.database}...\n')
url = f'https://{self.database}.bugsplat.com/post/py/'
files = self._create_files_for_post(additional_file_paths)
data = {
'database': self.database,
'appName': self.application,
'appVersion': self.version,
'appKey': app_key or self.app_key,
'description': description or self.description,
'exceptionMessage': str(ex),
'email': email or self.email,
'user': user or self.user,
'callstack': callstack
}
try:
response = requests.post(url, files=files, data=data)
if response.status_code != 200:
raise Exception(
f'Status: {response.status_code} \n Message: {response.text}'
)
self.logger.info('Crash posted successfully!')
except Exception as ex:
self.logger.exception('Crash post failed!', exc_info=ex)
@staticmethod
def _convert_exception_to_json(ex: BaseException):
def frame_summary_to_dict(s: traceback.FrameSummary):
return {
'filename': s.filename,
'line': s.line,
'lineno': s.lineno,
'locals': s.locals,
'name': s.name
}
tb = traceback.TracebackException.from_exception(ex, capture_locals=True)
return json.dumps([frame_summary_to_dict(t) for t in tb.stack])
@staticmethod
def _create_files_for_post(paths: List[PathLike]):
files = {}
for p in paths:
name = Path(p).name
files[name] = open(p, 'rb')
return files
| BugSplat |
python | ansible__ansible | lib/ansible/plugins/callback/tree.py | {
"start": 1385,
"end": 3619
} | class ____(CallbackBase):
"""
This callback puts results into a host specific file in a directory in json format.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'tree'
CALLBACK_NEEDS_ENABLED = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._display.deprecated( # pylint: disable=ansible-deprecated-unnecessary-collection-name
msg='The tree callback plugin is deprecated.',
version='2.23',
deprecator=_deprecator.ANSIBLE_CORE_DEPRECATOR, # entire plugin being removed; this improves the messaging
)
def set_options(self, task_keys=None, var_options=None, direct=None):
""" override to set self.tree """
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
if TREE_DIR:
# TREE_DIR comes from the CLI option --tree, only available for adhoc
self.tree = unfrackpath(TREE_DIR)
else:
self.tree = self.get_option('directory')
def write_tree_file(self, hostname, buf):
""" write something into treedir/hostname """
buf = to_bytes(buf)
try:
makedirs_safe(self.tree)
except OSError as ex:
self._display.error_as_warning(f"Unable to access or create the configured directory {self.tree!r}.", exception=ex)
try:
path = to_bytes(os.path.join(self.tree, hostname))
with open(path, 'wb+') as fd:
fd.write(buf)
except OSError as ex:
self._display.error_as_warning(f"Unable to write to {hostname!r}'s file.", exception=ex)
def result_to_tree(self, result: CallbackTaskResult) -> None:
self.write_tree_file(result.host.get_name(), self._dump_results(result.result))
def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
self.result_to_tree(result)
def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors: bool = False) -> None:
self.result_to_tree(result)
def v2_runner_on_unreachable(self, result: CallbackTaskResult) -> None:
self.result_to_tree(result)
| CallbackModule |
python | ray-project__ray | rllib/algorithms/tests/test_node_failures.py | {
"start": 694,
"end": 7305
} | class ____(unittest.TestCase):
def setUp(self):
# Simulate a cluster on one machine.
self.cluster = Cluster()
for i in range(num_nodes):
self.cluster.add_node(
redis_port=6379 if i == 0 else None,
num_cpus=2,
num_gpus=0,
object_store_memory=object_store_memory,
dashboard_host="0.0.0.0",
)
self.cluster.wait_for_nodes()
ray.init(address=self.cluster.address)
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
def test_node_failure_ignore(self):
# We ignore EnvRunners once failed nodes have come back and continue training
# with fewer EnvRunners.
config = (
PPOConfig()
.environment("CartPole-v1")
.env_runners(
num_env_runners=6,
validate_env_runners_after_construction=True,
)
.fault_tolerance(
ignore_env_runner_failures=True,
restart_failed_env_runners=False,
)
)
self._train(config=config, iters=10, min_reward=150.0, preempt_freq=4)
def test_node_failure_recreate_env_runners(self):
# We recreate failed EnvRunners and continue training.
config = (
APPOConfig()
.environment("CartPole-v1")
.learners(num_learners=0)
.experimental(_validate_config=False)
.env_runners(
num_env_runners=6,
validate_env_runners_after_construction=True,
)
.fault_tolerance(
restart_failed_env_runners=True,
ignore_env_runner_failures=False, # True also ok here; we restart.
)
)
self._train(config=config, iters=20, min_reward=300.0, preempt_freq=5)
config = (
PPOConfig()
.environment("CartPole-v1")
.env_runners(
num_env_runners=6,
validate_env_runners_after_construction=True,
)
.fault_tolerance(
restart_failed_env_runners=True,
ignore_env_runner_failures=False, # True also ok here; we restart.
)
)
self._train(config=config, iters=20, min_reward=300.0, preempt_freq=5)
def test_node_failure_expect_crash(self):
# We do not ignore EnvRunner failures and expect to crash upon failure.
config = (
PPOConfig()
.environment("CartPole-v1")
.env_runners(
num_env_runners=6,
validate_env_runners_after_construction=True,
)
.fault_tolerance(
ignore_env_runner_failures=False,
restart_failed_env_runners=False,
)
)
self.assertRaisesRegex(
ray.exceptions.RayError,
"The actor died unexpectedly before",
lambda: (
self._train(config=config, iters=10, min_reward=1000.0, preempt_freq=2)
),
)
def _train(self, *, config, iters, min_reward, preempt_freq):
algo = config.build()
best_return = 0.0
for i in range(iters):
results = algo.train()
print(f"ITER={i} of {iters} results={results}")
best_return = max(
best_return, results[ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN]
)
avg_batch = results[LEARNER_RESULTS][DEFAULT_MODULE_ID][
MODULE_TRAIN_BATCH_SIZE_MEAN
]
if config.algo_class.__name__ == "PPO":
exp_batch_size = config.minibatch_size
else:
exp_batch_size = config.total_train_batch_size
self.assertGreaterEqual(avg_batch, exp_batch_size)
self.assertLess(
avg_batch,
exp_batch_size + config.get_rollout_fragment_length(),
)
self.assertEqual(algo.env_runner_group.num_remote_env_runners(), 6)
healthy_env_runners = algo.env_runner_group.num_healthy_remote_workers()
# After node has been removed and we recreate failed EnvRunners, we'd expect
# 2 EnvRunners to be gone.
# If we ignore EnvRunner failures, and both nodes have been shut down at
# least once, we might even only see 2 EnvRunners left (the ones on the head
# node, which are always safe from preemption).
if (i - 1) % preempt_freq == 0:
if config.restart_failed_env_runners:
# For async algos that call `restore_env_runners()` several times
# per iteration, the failed env runners may have already been
# restored.
if isinstance(config, APPOConfig):
self.assertIn(healthy_env_runners, [4, 6])
else:
self.assertEqual(healthy_env_runners, 4)
elif config.ignore_env_runner_failures:
self.assertIn(healthy_env_runners, [2, 4])
# After the 0th iteration, in which we already killed one node, if
# we don't recreate, the number of EnvRunners should be 2 (only head
# EnvRunners left) or 4 (one node down).
elif i > 0 and not config.restart_failed_env_runners:
self.assertIn(healthy_env_runners, [2, 4])
# Otherwise, all EnvRunners should be there (but might still be in the
# process of coming up).
else:
self.assertIn(healthy_env_runners, [4, 5, 6])
# Shut down one node every n iterations.
if i % preempt_freq == 0:
to_kill = get_other_nodes(self.cluster, exclude_head=True)[0]
print(f"Killing node {to_kill} ...")
self.cluster.remove_node(to_kill)
# Bring back a previously failed node.
elif (i - 1) % preempt_freq == 0:
print("Bringing back node ...")
self.cluster.add_node(
redis_port=None,
num_cpus=2,
num_gpus=0,
object_store_memory=object_store_memory,
dashboard_host="0.0.0.0",
)
algo.stop()
self.assertGreaterEqual(best_return, min_reward)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestNodeFailures |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 4602,
"end": 4859
} | class ____:
def setup(self):
dr = date_range("20220101", periods=100_000, freq="s", tz="UTC")
self.clipper_dt = dr[0:1_000].repeat(100)
self.s = Series(dr)
def time_clip(self):
self.s.clip(upper=self.clipper_dt)
| ClipDt |
python | pyqtgraph__pyqtgraph | pyqtgraph/console/CmdInput.py | {
"start": 44,
"end": 1634
} | class ____(QtWidgets.QLineEdit):
sigExecuteCmd = QtCore.Signal(object)
def __init__(self, parent):
QtWidgets.QLineEdit.__init__(self, parent)
self.ps1 = ">>> "
self.ps2 = "... "
self.history = [""]
self.ptr = 0
font = QtGui.QFont("monospace")
font.setStyleHint(QtGui.QFont.StyleHint.TypeWriter, QtGui.QFont.StyleStrategy.PreferAntialias)
self.setFont(font)
self.setMultiline(False)
def setMultiline(self, ml):
if ml:
self.setPlaceholderText(self.ps2)
else:
self.setPlaceholderText(self.ps1)
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key.Key_Up:
if self.ptr < len(self.history) - 1:
self.setHistory(self.ptr+1)
ev.accept()
return
elif ev.key() == QtCore.Qt.Key.Key_Down:
if self.ptr > 0:
self.setHistory(self.ptr-1)
ev.accept()
return
elif ev.key() in (QtCore.Qt.Key.Key_Return, QtCore.Qt.Key.Key_Enter):
self.execCmd()
else:
super().keyPressEvent(ev)
self.history[0] = self.text()
def execCmd(self):
cmd = self.text()
if len(self.history) == 1 or cmd != self.history[1]:
self.history.insert(1, cmd)
self.history[0] = ""
self.setHistory(0)
self.sigExecuteCmd.emit(cmd)
def setHistory(self, num):
self.ptr = num
self.setText(self.history[self.ptr])
| CmdInput |
python | astropy__astropy | astropy/coordinates/tests/test_masked.py | {
"start": 8752,
"end": 12338
} | class ____(MaskedSphericalSetup):
"""Tests that mask is calculated properly for frames, using FK5."""
@classmethod
def setup_class(cls):
super().setup_class()
cls.fk5 = FK5(cls.msph)
def test_initialization_directly(self):
d = Masked([50, 1.0] * u.kpc, mask=[False, True])
fk5 = FK5([0, 30] * u.deg, [-10, 10] * u.deg, distance=d)
assert fk5.masked
assert_array_equal(fk5.ra.mask, [False, False])
assert_array_equal(fk5.dec.mask, [False, False])
assert_array_equal(fk5.distance.mask, [False, True])
assert_array_equal(fk5.mask, [False, True])
assert_array_equal(fk5.get_mask("ra", "dec"), False)
assert "—" in repr(fk5)
def test_class_initialization(self):
assert_array_equal(self.fk5.ra.mask, self.mask_lon)
assert_array_equal(self.fk5.dec.mask, self.mask_lat)
assert_array_equal(self.fk5.distance.mask, self.mask_dis)
assert_array_equal(self.fk5.mask, self.mask)
assert_array_equal(self.fk5.get_mask("ra", "dec"), self.mask_ang)
unmasked = self.fk5.unmasked
assert_array_equal(unmasked.ra, self.lon)
assert_array_equal(unmasked.dec, self.lat)
assert_array_equal(unmasked.distance, self.dis)
def test_cache_clearing(self):
mfk5 = self.fk5.copy()
assert_array_equal(mfk5.ra.mask, self.mask_lon)
assert "—" in repr(mfk5)
mfk5[...] = np.ma.nomask
assert_array_equal(mfk5.data.mask, np.zeros(mfk5.shape, bool))
assert_array_equal(mfk5.ra.mask, np.zeros(mfk5.shape, bool))
assert "—" not in repr(mfk5)
mfk5[...] = self.fk5
assert_array_equal(mfk5.ra.mask, self.mask_lon)
assert "—" in repr(mfk5)
def test_cartesian(self):
mcart = FK5(self.msph.represent_as(r.CartesianRepresentation))
assert_array_equal(mcart.mask, self.mask)
def test_unit_spherical(self):
musph = FK5(self.msph.represent_as(r.UnitSphericalRepresentation))
assert_array_equal(musph.mask, self.mask_ang)
assert_array_equal(musph.get_mask(), self.mask_ang)
def test_physics_spherical(self):
mpsph = FK5(self.msph.represent_as(r.PhysicsSphericalRepresentation))
assert_array_equal(mpsph.mask, self.mask)
assert_array_equal(mpsph.get_mask("data.phi", "data.theta"), self.mask_ang)
def test_get_mask(self):
assert_array_equal(self.fk5.get_mask("ra"), self.mask_lon)
assert_array_equal(self.fk5.get_mask("ra", "dec"), self.mask_ang)
assert_array_equal(self.fk5.get_mask("data.lat"), self.mask_lat)
assert_array_equal(self.fk5.get_mask("data.lat", "data.lon"), self.mask_ang)
assert_array_equal(self.fk5.get_mask("cartesian"), self.mask)
assert_array_equal(self.fk5.get_mask("equinox"), np.zeros(self.fk5.shape, bool))
def test_unmasked_frame(self):
fk5 = self.fk5.unmasked
assert not fk5.masked
assert_array_equal(fk5.mask, np.zeros(fk5.shape, bool))
assert_array_equal(fk5.get_mask(), np.zeros(fk5.shape, bool))
def test_frame_without_data():
fk5_no_data = FK5(equinox=["J2000", "J2001"])
with pytest.raises(ValueError, match="does not have associated data"):
fk5_no_data.masked
with pytest.raises(ValueError, match="does not have associated data"):
fk5_no_data.mask
with pytest.raises(ValueError, match="does not have associated data"):
fk5_no_data.get_mask()
assert_array_equal(fk5_no_data.get_mask("equinox"), np.zeros((2,), bool))
| TestFrame |
python | facelessuser__pymdown-extensions | pymdownx/blocks/__init__.py | {
"start": 18955,
"end": 19700
} | class ____(Extension):
"""Blocks Extension."""
def register_block_mgr(self, md: Markdown) -> BlocksProcessor:
"""Add Blocks to Markdown instance."""
if 'blocks' not in md.parser.blockprocessors:
ext = BlocksMgrExtension()
ext.extendMarkdown(md)
mgr = ext.extension
else:
mgr = cast('BlocksProcessor', md.parser.blockprocessors['blocks'])
return mgr
def extendMarkdown(self, md: Markdown) -> None:
"""Extend markdown."""
mgr = self.register_block_mgr(md)
self.extendMarkdownBlocks(md, mgr)
def extendMarkdownBlocks(self, md: Markdown, block_mgr: BlocksProcessor) -> None:
"""Extend Markdown blocks."""
| BlocksExtension |
python | readthedocs__readthedocs.org | readthedocs/organizations/filters.py | {
"start": 5182,
"end": 6111
} | class ____(OrganizationFilterSet):
"""
Filter and sorting for organization team listing page.
This filter set creates the following filters in the organization team
listing UI:
Team
A list of team names that the user has access to, using ``slug`` as a
lookup field. This filter is used mostly for direct linking to a
specific team in the listing UI, but can be used for quick filtering
with the dropdown too.
"""
slug = FilteredModelChoiceFilter(
label=_("Team"),
empty_label=_("All teams"),
field_name="teams",
to_field_name="slug",
queryset_method="get_team_queryset",
method="get_team",
label_attribute="name",
)
def get_team_queryset(self):
return self.queryset
def get_team(self, queryset, field_name, team):
return queryset.filter(slug=team.slug)
| OrganizationTeamListFilterSet |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 21833,
"end": 31774
} | class ____(PerceiverPreTrainedModel):
def __init__(
self,
config,
decoder: Optional["PerceiverAbstractDecoder"] = None,
input_preprocessor: PreprocessorType = None,
output_postprocessor: PostprocessorType = None,
):
r"""
decoder (`PerceiverDecoder`, *optional*):
Decoder module that transforms latent representations into task predictions.
input_preprocessor (`PreprocessorType`, *optional*):
Preprocessor that encodes raw inputs into tensors for the model.
output_postprocessor (`PostprocessorType`, *optional*):
Postprocessor that transforms model outputs into final predictions.
"""
super().__init__(config)
self.config = config
self.input_preprocessor = input_preprocessor
self.output_postprocessor = output_postprocessor
self.embeddings = PerceiverEmbeddings(config)
self.encoder = PerceiverEncoder(
config, kv_dim=input_preprocessor.num_channels if input_preprocessor is not None else config.d_model
)
self.decoder = decoder
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.latents
def set_input_embeddings(self, value):
self.embeddings.latents = value
@auto_docstring
def forward(
self,
inputs: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
subsampled_output_points: Optional[dict[str, torch.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, PerceiverModelOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
subsampled_output_points (`dict[str, torch.Tensor]`, *optional*):
Dictionary of tensors used as queries for the decoder. The decoder maps these queries to the latent
representation of the model. Used for subsampled decoding, e.g. when only decoding certain image patches.
Examples:
```python
>>> from transformers import PerceiverConfig, PerceiverTokenizer, PerceiverImageProcessor, PerceiverModel
>>> from transformers.models.perceiver.modeling_perceiver import (
... PerceiverTextPreprocessor,
... PerceiverImagePreprocessor,
... PerceiverClassificationDecoder,
... )
>>> import torch
>>> import requests
>>> from PIL import Image
>>> # EXAMPLE 1: using the Perceiver to classify texts
>>> # - we define a TextPreprocessor, which can be used to embed tokens
>>> # - we define a ClassificationDecoder, which can be used to decode the
>>> # final hidden states of the latents to classification logits
>>> # using trainable position embeddings
>>> config = PerceiverConfig()
>>> preprocessor = PerceiverTextPreprocessor(config)
>>> decoder = PerceiverClassificationDecoder(
... config,
... num_channels=config.d_latents,
... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
... use_query_residual=True,
... )
>>> model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder)
>>> # you can then do a forward pass as follows:
>>> tokenizer = PerceiverTokenizer()
>>> text = "hello world"
>>> inputs = tokenizer(text, return_tensors="pt").input_ids
>>> with torch.no_grad():
... outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2]
>>> # to train, one can train the model using standard cross-entropy:
>>> criterion = torch.nn.CrossEntropyLoss()
>>> labels = torch.tensor([1])
>>> loss = criterion(logits, labels)
>>> # EXAMPLE 2: using the Perceiver to classify images
>>> # - we define an ImagePreprocessor, which can be used to embed images
>>> config = PerceiverConfig(image_size=224)
>>> preprocessor = PerceiverImagePreprocessor(
... config,
... prep_type="conv1x1",
... spatial_downsample=1,
... out_channels=256,
... position_encoding_type="trainable",
... concat_or_add_pos="concat",
... project_pos_dim=256,
... trainable_position_encoding_kwargs=dict(
... num_channels=256,
... index_dims=config.image_size**2,
... ),
... )
>>> model = PerceiverModel(
... config,
... input_preprocessor=preprocessor,
... decoder=PerceiverClassificationDecoder(
... config,
... num_channels=config.d_latents,
... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
... use_query_residual=True,
... ),
... )
>>> # you can then do a forward pass as follows:
>>> image_processor = PerceiverImageProcessor()
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt").pixel_values
>>> with torch.no_grad():
... outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2]
>>> # to train, one can train the model using standard cross-entropy:
>>> criterion = torch.nn.CrossEntropyLoss()
>>> labels = torch.tensor([1])
>>> loss = criterion(logits, labels)
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.input_preprocessor is not None:
inputs, modality_sizes, inputs_without_pos = self.input_preprocessor(
inputs, interpolate_pos_encoding=interpolate_pos_encoding
)
else:
modality_sizes = None
inputs_without_pos = None
if inputs.size()[-1] != self.config.d_model:
raise ValueError(
f"Last dimension of the inputs: {inputs.size()[-1]} doesn't correspond to config.d_model:"
f" {self.config.d_model}. Make sure to set config.d_model appropriately."
)
batch_size, seq_length, _ = inputs.size()
device = inputs.device
# If no attention mask is provided, make them all ones
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
# Make the attention mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = self.invert_attention_mask(attention_mask)
embedding_output = self.embeddings(batch_size=batch_size)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=None,
inputs=inputs,
inputs_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
logits = None
if self.decoder:
if subsampled_output_points is not None:
output_modality_sizes = {
"audio": subsampled_output_points["audio"].shape[0],
"image": subsampled_output_points["image"].shape[0],
"label": 1,
}
else:
output_modality_sizes = modality_sizes
decoder_query = self.decoder.decoder_query(
inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_output_points
)
decoder_outputs = self.decoder(
decoder_query,
z=sequence_output,
query_mask=extended_attention_mask,
output_attentions=output_attentions,
)
logits = decoder_outputs.logits
# add cross-attentions of decoder
if output_attentions and decoder_outputs.cross_attentions is not None:
if return_dict:
encoder_outputs.cross_attentions = (
encoder_outputs.cross_attentions + decoder_outputs.cross_attentions
)
else:
encoder_outputs = encoder_outputs + decoder_outputs.cross_attentions
if self.output_postprocessor:
logits = self.output_postprocessor(logits, modality_sizes=output_modality_sizes)
if not return_dict:
if logits is not None:
return (logits, sequence_output) + encoder_outputs[1:]
else:
return (sequence_output,) + encoder_outputs[1:]
return PerceiverModelOutput(
logits=logits,
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for masked language modeling.
"""
)
| PerceiverModel |
python | getsentry__sentry | src/sentry/sentry_apps/api/endpoints/group_external_issue_details.py | {
"start": 437,
"end": 996
} | class ____(GroupEndpoint):
owner = ApiOwner.ECOSYSTEM
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
}
def delete(self, request: Request, external_issue_id, group) -> Response:
try:
external_issue = PlatformExternalIssue.objects.get(
id=external_issue_id, group_id=group.id
)
except PlatformExternalIssue.DoesNotExist:
return Response(status=404)
deletions.exec_sync(external_issue)
return Response(status=204)
| GroupExternalIssueDetailsEndpoint |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/training_generator_v1.py | {
"start": 24618,
"end": 27024
} | class ____(training_utils_v1.TrainingLoop):
"""A non-distributed Dataset or iterator in eager execution."""
def fit(self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs):
model._validate_or_infer_batch_size(batch_size, steps_per_epoch, x)
# Make sure that y, sample_weights, validation_split are not passed.
training_utils_v1.validate_dataset_input(x, y, sample_weight,
validation_split)
if (isinstance(x, (data_types.DatasetV1, data_types.DatasetV2)) and
shuffle):
training_utils_v1.verify_dataset_shuffled(x)
return fit_generator(
model,
x,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
validation_freq=validation_freq,
class_weight=class_weight,
workers=0,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_name='steps_per_epoch')
def evaluate(self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs):
model._validate_or_infer_batch_size(batch_size, steps, x)
# Make sure that y, sample_weights, validation_split are not passed.
training_utils_v1.validate_dataset_input(x, y, sample_weight)
return evaluate_generator(
model, x, steps=steps, verbose=verbose, workers=0, callbacks=callbacks)
def predict(self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs):
model._validate_or_infer_batch_size(batch_size, steps, x)
return predict_generator(
model, x, steps=steps, verbose=verbose, workers=0, callbacks=callbacks)
| EagerDatasetOrIteratorTrainingLoop |
python | crytic__slither | slither/core/cfg/node.py | {
"start": 3168,
"end": 41768
} | class ____(SourceMapping): # pylint: disable=too-many-public-methods
"""
Node class
"""
def __init__(
self,
node_type: NodeType,
node_id: int,
scope: Union["Scope", "Function"],
file_scope: "FileScope",
) -> None:
super().__init__()
self._node_type = node_type
# TODO: rename to explicit CFG
self._sons: List["Node"] = []
self._fathers: List["Node"] = []
## Dominators info
# Dominators nodes
self._dominators: Set["Node"] = set()
self._immediate_dominator: Optional["Node"] = None
## Nodes of the dominators tree
# self._dom_predecessors = set()
self._dom_successors: Set["Node"] = set()
# Dominance frontier
self._dominance_frontier: Set["Node"] = set()
# Phi origin
# key are variable name
self._phi_origins_state_variables: Dict[str, Tuple[StateVariable, Set["Node"]]] = {}
self._phi_origins_local_variables: Dict[str, Tuple[LocalVariable, Set["Node"]]] = {}
# self._phi_origins_member_variables: Dict[str, Tuple[MemberVariable, Set["Node"]]] = {}
self._expression: Optional[Expression] = None
self._variable_declaration: Optional[LocalVariable] = None
self._node_id: int = node_id
self._vars_written: List[Variable] = []
self._vars_read: List[Union[Variable, SolidityVariable]] = []
self._ssa_vars_written: List["SlithIRVariable"] = []
self._ssa_vars_read: List["SlithIRVariable"] = []
self._internal_calls: List[InternalCall] = [] # contains solidity calls
self._solidity_calls: List[SolidityCall] = []
self._high_level_calls: List[Tuple[Contract, HighLevelCall]] = [] # contains library calls
self._library_calls: List[LibraryCall] = []
self._low_level_calls: List[LowLevelCall] = []
self._external_calls_as_expressions: List[Expression] = []
self._internal_calls_as_expressions: List[Expression] = []
self._irs: List[Operation] = []
self._all_slithir_operations: Optional[List[Operation]] = None
self._irs_ssa: List[Operation] = []
self._state_vars_written: List[StateVariable] = []
self._state_vars_read: List[StateVariable] = []
self._solidity_vars_read: List[SolidityVariable] = []
self._ssa_state_vars_written: List[StateIRVariable] = []
self._ssa_state_vars_read: List[StateIRVariable] = []
self._local_vars_read: List[LocalVariable] = []
self._local_vars_written: List[LocalVariable] = []
self._slithir_vars: Set[
Union["SlithIRVariable", ReferenceVariable, TemporaryVariable, TupleVariable]
] = set() # non SSA
self._ssa_local_vars_read: List[LocalIRVariable] = []
self._ssa_local_vars_written: List[LocalIRVariable] = []
self._expression_vars_written: List[Expression] = []
self._expression_vars_read: List[Expression] = []
self._expression_calls: List[Expression] = []
# Computed on the fly, can be True of False
self._can_reenter: Optional[bool] = None
self._can_send_eth: Optional[bool] = None
self._asm_source_code: Optional[Union[str, Dict]] = None
self.scope: Union["Scope", "Function"] = scope
self.file_scope: "FileScope" = file_scope
self._function: Optional["Function"] = None
self._is_reachable: bool = False
###################################################################################
###################################################################################
# region General's properties
###################################################################################
###################################################################################
@property
def compilation_unit(self) -> "SlitherCompilationUnit":
return self.function.compilation_unit
@property
def node_id(self) -> int:
"""Unique node id."""
return self._node_id
@property
def type(self) -> NodeType:
"""
NodeType: type of the node
"""
return self._node_type
@type.setter
def type(self, new_type: NodeType) -> None:
self._node_type = new_type
@property
def will_return(self) -> bool:
if not self.sons and self.type != NodeType.THROW:
solidity_calls = [ir.function for ir in self.solidity_calls]
if SolidityFunction("revert()") not in solidity_calls:
if SolidityFunction("revert(string)") not in solidity_calls:
return True
return False
def set_function(self, function: "Function") -> None:
self._function = function
@property
def function(self) -> "Function":
return self._function
@property
def is_reachable(self) -> bool:
return self._is_reachable
def set_is_reachable(self, new_is_reachable: bool) -> None:
self._is_reachable = new_is_reachable
# endregion
###################################################################################
###################################################################################
# region Variables
###################################################################################
###################################################################################
@property
def variables_read(self) -> List[Union[Variable, SolidityVariable]]:
"""
list(Variable): Variables read (local/state/solidity)
"""
return list(self._vars_read)
@property
def state_variables_read(self) -> List[StateVariable]:
"""
list(StateVariable): State variables read
"""
return list(self._state_vars_read)
@property
def local_variables_read(self) -> List[LocalVariable]:
"""
list(LocalVariable): Local variables read
"""
return list(self._local_vars_read)
@property
def solidity_variables_read(self) -> List[SolidityVariable]:
"""
list(SolidityVariable): State variables read
"""
return list(self._solidity_vars_read)
@property
def ssa_variables_read(self) -> List["SlithIRVariable"]:
"""
list(Variable): Variables read (local/state/solidity)
"""
return list(self._ssa_vars_read)
@property
def ssa_state_variables_read(self) -> List[StateIRVariable]:
"""
list(StateVariable): State variables read
"""
return list(self._ssa_state_vars_read)
@property
def ssa_local_variables_read(self) -> List[LocalIRVariable]:
"""
list(LocalVariable): Local variables read
"""
return list(self._ssa_local_vars_read)
@property
def variables_read_as_expression(self) -> List[Expression]:
return self._expression_vars_read
@variables_read_as_expression.setter
def variables_read_as_expression(self, exprs: List[Expression]) -> None:
self._expression_vars_read = exprs
@property
def slithir_variables(
self,
) -> List[Union["SlithIRVariable", ReferenceVariable, TemporaryVariable, TupleVariable]]:
return list(self._slithir_vars)
@property
def variables_written(self) -> List[Variable]:
"""
list(Variable): Variables written (local/state/solidity)
"""
return list(self._vars_written)
@property
def state_variables_written(self) -> List[StateVariable]:
"""
list(StateVariable): State variables written
"""
return list(self._state_vars_written)
@property
def local_variables_written(self) -> List[LocalVariable]:
"""
list(LocalVariable): Local variables written
"""
return list(self._local_vars_written)
@property
def ssa_variables_written(self) -> List["SlithIRVariable"]:
"""
list(Variable): Variables written (local/state/solidity)
"""
return list(self._ssa_vars_written)
@property
def ssa_state_variables_written(self) -> List[StateIRVariable]:
"""
list(StateVariable): State variables written
"""
return list(self._ssa_state_vars_written)
@property
def ssa_local_variables_written(self) -> List[LocalIRVariable]:
"""
list(LocalVariable): Local variables written
"""
return list(self._ssa_local_vars_written)
@property
def variables_written_as_expression(self) -> List[Expression]:
return self._expression_vars_written
@variables_written_as_expression.setter
def variables_written_as_expression(self, exprs: List[Expression]) -> None:
self._expression_vars_written = exprs
# endregion
###################################################################################
###################################################################################
# region Calls
###################################################################################
###################################################################################
@property
def internal_calls(self) -> List[InternalCall]:
"""
list(InternalCall): List of IR operations with internal/solidity function calls
"""
return list(self._internal_calls)
@property
def solidity_calls(self) -> List[SolidityCall]:
"""
list(SolidityCall): List of IR operations with solidity calls
"""
return list(self._solidity_calls)
@property
def high_level_calls(self) -> List[HighLevelCall]:
"""
list(HighLevelCall): List of IR operations with high level calls (external calls).
Include library calls
"""
return list(self._high_level_calls)
@property
def library_calls(self) -> List[LibraryCall]:
"""
list(LibraryCall): List of IR operations with library calls.
"""
return list(self._library_calls)
@property
def low_level_calls(self) -> List[LowLevelCall]:
"""
list(LowLevelCall): List of IR operations with low_level call
"""
return list(self._low_level_calls)
@property
def external_calls_as_expressions(self) -> List[Expression]:
"""
list(CallExpression): List of message calls (that creates a transaction)
"""
return self._external_calls_as_expressions
@external_calls_as_expressions.setter
def external_calls_as_expressions(self, exprs: List[Expression]) -> None:
self._external_calls_as_expressions = exprs
@property
def internal_calls_as_expressions(self) -> List[Expression]:
"""
list(CallExpression): List of internal calls (that dont create a transaction)
"""
return self._internal_calls_as_expressions
@internal_calls_as_expressions.setter
def internal_calls_as_expressions(self, exprs: List[Expression]) -> None:
self._internal_calls_as_expressions = exprs
@property
def calls_as_expression(self) -> List[Expression]:
return list(self._expression_calls)
@calls_as_expression.setter
def calls_as_expression(self, exprs: List[Expression]) -> None:
self._expression_calls = exprs
def can_reenter(self, callstack: Optional[List[Union[Function, Variable]]] = None) -> bool:
"""
Check if the node can re-enter
Do not consider CREATE as potential re-enter, but check if the
destination's constructor can contain a call (recurs. follow nested CREATE)
For Solidity > 0.5, filter access to public variables and constant/pure/view
For call to this. check if the destination can re-enter
Do not consider Send/Transfer as there is not enough gas
:param callstack: used internally to check for recursion
:return bool:
"""
# pylint: disable=import-outside-toplevel
from slither.slithir.operations import Call
if self._can_reenter is None:
self._can_reenter = False
for ir in self.irs:
if isinstance(ir, Call) and ir.can_reenter(callstack):
self._can_reenter = True
return True
return self._can_reenter
def can_send_eth(self) -> bool:
"""
Check if the node can send eth
:return bool:
"""
# pylint: disable=import-outside-toplevel
from slither.slithir.operations import Call
if self._can_send_eth is None:
self._can_send_eth = False
for ir in self.all_slithir_operations():
if isinstance(ir, Call) and ir.can_send_eth():
self._can_send_eth = True
return True
return self._can_send_eth
# endregion
###################################################################################
###################################################################################
# region Expressions
###################################################################################
###################################################################################
@property
def expression(self) -> Optional[Expression]:
"""
Expression: Expression of the node
"""
return self._expression
def add_expression(self, expression: Expression, bypass_verif_empty: bool = False) -> None:
assert self._expression is None or bypass_verif_empty
self._expression = expression
def add_variable_declaration(self, var: LocalVariable) -> None:
assert self._variable_declaration is None
self._variable_declaration = var
if var.expression:
self._vars_written += [var]
self._local_vars_written += [var]
@property
def variable_declaration(self) -> Optional[LocalVariable]:
"""
Returns:
LocalVariable
"""
return self._variable_declaration
# endregion
###################################################################################
###################################################################################
# region Summary information
###################################################################################
###################################################################################
def contains_require_or_assert(self) -> bool:
"""
Check if the node has a require or assert call
Returns:
bool: True if the node has a require or assert call
"""
return any(
ir.function.name
in ["require(bool)", "require(bool,string)", "require(bool,error)", "assert(bool)"]
for ir in self.internal_calls
)
def contains_if(self, include_loop: bool = True) -> bool:
"""
Check if the node is a IF node
Returns:
bool: True if the node is a conditional node (IF or IFLOOP)
"""
if include_loop:
return self.type in [NodeType.IF, NodeType.IFLOOP]
return self.type == NodeType.IF
def is_conditional(self, include_loop: bool = True) -> bool:
"""
Check if the node is a conditional node
A conditional node is either a IF or a require/assert or a RETURN bool
Returns:
bool: True if the node is a conditional node
"""
if self.contains_if(include_loop) or self.contains_require_or_assert():
return True
if self.irs:
last_ir = self.irs[-1]
if last_ir:
if isinstance(last_ir, Return):
for r in last_ir.read:
if r.type == ElementaryType("bool"):
return True
return False
# endregion
###################################################################################
###################################################################################
# region EVM
###################################################################################
###################################################################################
@property
def inline_asm(self) -> Optional[Union[str, Dict]]:
return self._asm_source_code
def add_inline_asm(self, asm: Union[str, Dict]) -> None:
self._asm_source_code = asm
# endregion
###################################################################################
###################################################################################
# region Graph
###################################################################################
###################################################################################
def add_father(self, father: "Node") -> None:
"""Add a father node
Args:
father: father to add
"""
self._fathers.append(father)
def set_fathers(self, fathers: List["Node"]) -> None:
"""Set the father nodes
Args:
fathers: list of fathers to add
"""
self._fathers = fathers
@property
def fathers(self) -> List["Node"]:
"""Returns the father nodes
Returns:
list(Node): list of fathers
"""
return list(self._fathers)
def remove_father(self, father: "Node") -> None:
"""Remove the father node. Do nothing if the node is not a father
Args:
:param father:
"""
self._fathers = [x for x in self._fathers if x.node_id != father.node_id]
def remove_son(self, son: "Node") -> None:
"""Remove the son node. Do nothing if the node is not a son
Args:
:param son:
"""
self._sons = [x for x in self._sons if x.node_id != son.node_id]
def add_son(self, son: "Node") -> None:
"""Add a son node
Args:
son: son to add
"""
self._sons.append(son)
def replace_son(self, ori_son: "Node", new_son: "Node") -> None:
"""Replace a son node. Do nothing if the node to replace is not a son
Args:
ori_son: son to replace
new_son: son to replace with
"""
for i, s in enumerate(self._sons):
if s.node_id == ori_son.node_id:
idx = i
break
else:
return
self._sons[idx] = new_son
def set_sons(self, sons: List["Node"]) -> None:
"""Set the son nodes
Args:
sons: list of fathers to add
"""
self._sons = sons
@property
def sons(self) -> List["Node"]:
"""Returns the son nodes
Returns:
list(Node): list of sons
"""
return list(self._sons)
@property
def son_true(self) -> Optional["Node"]:
if self.type in [NodeType.IF, NodeType.IFLOOP]:
return self._sons[0]
return None
@property
def son_false(self) -> Optional["Node"]:
if self.type in [NodeType.IF, NodeType.IFLOOP] and len(self._sons) >= 1:
return self._sons[1]
return None
# endregion
###################################################################################
###################################################################################
# region SlithIR
###################################################################################
###################################################################################
@property
def irs(self) -> List[Operation]:
"""Returns the slithIR representation
return
list(slithIR.Operation)
"""
return self._irs
@property
def irs_ssa(self) -> List[Operation]:
"""Returns the slithIR representation with SSA
return
list(slithIR.Operation)
"""
return self._irs_ssa
@irs_ssa.setter
def irs_ssa(self, irs: List[Operation]) -> None:
self._irs_ssa = irs
def add_ssa_ir(self, ir: Operation) -> None:
"""
Use to place phi operation
"""
ir.set_node(self) # type: ignore
self._irs_ssa.append(ir)
def slithir_generation(self) -> None:
if self.expression:
expression = self.expression
self._irs = convert_expression(expression, self) # type:ignore
self._find_read_write_call()
def all_slithir_operations(self) -> List[Operation]:
if self._all_slithir_operations is None:
irs = list(self.irs)
for ir in self.irs:
if isinstance(ir, InternalCall):
irs += ir.function.all_slithir_operations()
self._all_slithir_operations = irs
return self._all_slithir_operations
@staticmethod
def _is_non_slithir_var(var: Variable) -> bool:
return not isinstance(var, (Constant, ReferenceVariable, TemporaryVariable, TupleVariable))
@staticmethod
def _is_valid_slithir_var(var: Variable) -> bool:
return isinstance(var, (ReferenceVariable, TemporaryVariable, TupleVariable))
# endregion
###################################################################################
###################################################################################
# region Dominators
###################################################################################
###################################################################################
@property
def dominators(self) -> Set["Node"]:
"""
Returns:
set(Node)
"""
return self._dominators
@dominators.setter
def dominators(self, dom: Set["Node"]) -> None:
self._dominators = dom
@property
def immediate_dominator(self) -> Optional["Node"]:
"""
Returns:
Node or None
"""
return self._immediate_dominator
@immediate_dominator.setter
def immediate_dominator(self, idom: "Node") -> None:
self._immediate_dominator = idom
@property
def dominance_frontier(self) -> Set["Node"]:
"""
Returns:
set(Node)
"""
return self._dominance_frontier
@dominance_frontier.setter
def dominance_frontier(self, doms: Set["Node"]) -> None:
"""
Returns:
set(Node)
"""
self._dominance_frontier = doms
@property
def dominator_successors(self) -> Set["Node"]:
return self._dom_successors
@property
def dominance_exploration_ordered(self) -> List["Node"]:
"""
Sorted list of all the nodes to explore to follow the dom
:return: list(nodes)
"""
# Explore direct dominance
to_explore = sorted(list(self.dominator_successors), key=lambda x: x.node_id)
# Explore dominance frontier
# The frontier is the limit where this node dominates
# We need to explore it because the sub of the direct dominance
# Might not be dominator of their own sub
to_explore += sorted(list(self.dominance_frontier), key=lambda x: x.node_id)
return to_explore
# endregion
###################################################################################
###################################################################################
# region Phi operation
###################################################################################
###################################################################################
@property
def phi_origins_local_variables(
self,
) -> Dict[str, Tuple[LocalVariable, Set["Node"]]]:
return self._phi_origins_local_variables
@property
def phi_origins_state_variables(
self,
) -> Dict[str, Tuple[StateVariable, Set["Node"]]]:
return self._phi_origins_state_variables
# @property
# def phi_origin_member_variables(self) -> Dict[str, Tuple[MemberVariable, Set["Node"]]]:
# return self._phi_origins_member_variables
def add_phi_origin_local_variable(self, variable: LocalVariable, node: "Node") -> None:
if variable.name not in self._phi_origins_local_variables:
assert variable.name
self._phi_origins_local_variables[variable.name] = (variable, set())
(v, nodes) = self._phi_origins_local_variables[variable.name]
assert v == variable
nodes.add(node)
def add_phi_origin_state_variable(self, variable: StateVariable, node: "Node") -> None:
if variable.canonical_name not in self._phi_origins_state_variables:
self._phi_origins_state_variables[variable.canonical_name] = (
variable,
set(),
)
(v, nodes) = self._phi_origins_state_variables[variable.canonical_name]
assert v == variable
nodes.add(node)
# def add_phi_origin_member_variable(self, variable: MemberVariable, node: "Node"):
# if variable.name not in self._phi_origins_member_variables:
# self._phi_origins_member_variables[variable.name] = (variable, set())
# (v, nodes) = self._phi_origins_member_variables[variable.name]
# assert v == variable
# nodes.add(node)
# endregion
###################################################################################
###################################################################################
# region Analyses
###################################################################################
###################################################################################
def _find_read_write_call(self) -> None: # pylint: disable=too-many-statements
for ir in self.irs:
self._slithir_vars |= {v for v in ir.read if self._is_valid_slithir_var(v)}
if isinstance(ir, OperationWithLValue):
var = ir.lvalue
if var and self._is_valid_slithir_var(var):
# The type is checked by is_valid_slithir_var
self._slithir_vars.add(var) # type: ignore
if not isinstance(ir, (Phi, Index, Member)):
self._vars_read += [v for v in ir.read if self._is_non_slithir_var(v)]
for var in ir.read:
if isinstance(var, ReferenceVariable):
self._vars_read.append(var.points_to_origin)
elif isinstance(ir, (Member, Index)):
# TODO investigate types for member variable left
var = ir.variable_left if isinstance(ir, Member) else ir.variable_right
if var and self._is_non_slithir_var(var):
self._vars_read.append(var)
if isinstance(var, ReferenceVariable):
origin = var.points_to_origin
if self._is_non_slithir_var(origin):
self._vars_read.append(origin)
if isinstance(ir, OperationWithLValue):
if isinstance(ir, (Index, Member, Length)):
continue # Don't consider Member and Index operations -> ReferenceVariable
var = ir.lvalue
if isinstance(var, ReferenceVariable):
var = var.points_to_origin
if var and self._is_non_slithir_var(var):
self._vars_written.append(var)
if isinstance(ir, InternalCall):
self._internal_calls.append(ir)
if isinstance(ir, SolidityCall):
# TODO: consider removing dependancy of solidity_call to internal_call
self._solidity_calls.append(ir)
self._internal_calls.append(ir)
if isinstance(ir, LowLevelCall):
assert isinstance(ir.destination, (Variable, SolidityVariable))
self._low_level_calls.append(ir)
elif isinstance(ir, HighLevelCall) and not isinstance(ir, LibraryCall):
# Todo investigate this if condition
# It does seem right to compare against a contract
# This might need a refactoring
if isinstance(ir.destination.type, Contract):
self._high_level_calls.append((ir.destination.type, ir))
elif ir.destination == SolidityVariable("this"):
func = self.function
# Can't use this in a top level function
assert isinstance(func, FunctionContract)
self._high_level_calls.append((func.contract, ir))
else:
try:
# Todo this part needs more tests and documentation
self._high_level_calls.append((ir.destination.type.type, ir))
except AttributeError as error:
# pylint: disable=raise-missing-from
raise SlitherException(
f"Function not found on IR: {ir}.\nNode: {self} ({self.source_mapping})\nFunction: {self.function}\nPlease try compiling with a recent Solidity version. {error}"
)
elif isinstance(ir, LibraryCall):
assert isinstance(ir.destination, Contract)
assert isinstance(ir.function, Function)
self._high_level_calls.append((ir.destination, ir))
self._library_calls.append(ir)
self._vars_read = list(set(self._vars_read))
self._state_vars_read = [v for v in self._vars_read if isinstance(v, StateVariable)]
self._local_vars_read = [v for v in self._vars_read if isinstance(v, LocalVariable)]
self._solidity_vars_read = [
v_ for v_ in self._vars_read if isinstance(v_, SolidityVariable)
]
self._vars_written = list(set(self._vars_written))
self._state_vars_written = [v for v in self._vars_written if isinstance(v, StateVariable)]
self._local_vars_written = [v for v in self._vars_written if isinstance(v, LocalVariable)]
self._internal_calls = list(set(self._internal_calls))
self._solidity_calls = list(set(self._solidity_calls))
self._high_level_calls = list(set(self._high_level_calls))
self._library_calls = list(set(self._library_calls))
self._low_level_calls = list(set(self._low_level_calls))
@staticmethod
def _convert_ssa(v: Variable) -> Optional[Union[StateVariable, LocalVariable]]:
non_ssa_var: Optional[Union[StateVariable, LocalVariable]]
if isinstance(v, StateIRVariable):
contract = v.contract
assert v.name
non_ssa_var = contract.get_state_variable_from_name(v.name)
return non_ssa_var
assert isinstance(v, LocalIRVariable)
function = v.function
assert v.name
non_ssa_var = function.get_local_variable_from_name(v.name)
return non_ssa_var
def _update_read_using_ssa(self, ir: Operation) -> None:
"""
Update self._ssa_vars_read
This look for all operations that read a IRvariable
It uses the result of the storage pointer
- For "normal" operation, the read are mostly everything in ir.read
- For "index", the read is the left part (the right part being a reference variable)
- For Phi, nothing is considered read
"""
# For variable read, phi and index have special treatments
# Phi don't lead to values read
# Index leads to read the variable right (the left variable is a ref variable, not the actual object)
# Not that Member is a normal operation here, given we filter out constant by checking for the IRvaraible
if not isinstance(ir, (Phi, Index)):
self._ssa_vars_read += [
v for v in ir.read if isinstance(v, (StateIRVariable, LocalIRVariable))
]
for var in ir.read:
if isinstance(var, ReferenceVariable):
origin = var.points_to_origin
if isinstance(origin, (StateIRVariable, LocalIRVariable)):
self._ssa_vars_read.append(origin)
# If we read from a storage variable (outside of phi operator)
if isinstance(var, LocalIRVariable) and var.is_storage:
for refer_to in var.refers_to:
# the following should always be true
if isinstance(refer_to, (StateIRVariable, LocalIRVariable)):
self._ssa_vars_read.append(refer_to)
elif isinstance(ir, Index):
variable_right: RVALUE = ir.variable_right
if isinstance(variable_right, (StateIRVariable, LocalIRVariable)):
self._ssa_vars_read.append(variable_right)
if isinstance(variable_right, ReferenceVariable):
origin = variable_right.points_to_origin
if isinstance(origin, (StateIRVariable, LocalIRVariable)):
self._ssa_vars_read.append(origin)
def _update_write_using_ssa(self, ir: Operation) -> None:
"""
Update self._ssa_vars_written
This look for all operations that write a IRvariable
It uses the result of the storage pointer
Index/member/Length are not considering writing to anything
For index/member it is implictely handled when their associated RefernceVarible are written
"""
if isinstance(ir, OperationWithLValue) and not isinstance(ir, Phi):
if isinstance(ir, (Index, Member, Length)):
return # Don't consider Member and Index operations -> ReferenceVariable
var = ir.lvalue
if isinstance(var, ReferenceVariable):
var = var.points_to_origin
candidates = [var]
# If we write to a storage pointer, add everything it points to as target
# if it's a variable declaration we do not want to consider the right variable written in that case
# string storage ss = s; // s is a storage variable but should not be considered written at that point
if (
isinstance(var, LocalIRVariable)
and var.is_storage
and ir.node.type is not NodeType.VARIABLE
):
candidates += var.refers_to
for var in candidates:
# Only store non-slithIR variables
if var and isinstance(var, (StateIRVariable, LocalIRVariable)):
if isinstance(ir, PhiCallback):
continue
self._ssa_vars_written.append(var)
def update_read_write_using_ssa(self) -> None:
for ir in self.irs_ssa:
if isinstance(ir, PhiCallback):
continue
self._update_read_using_ssa(ir)
self._update_write_using_ssa(ir)
self._ssa_vars_read = list(set(self._ssa_vars_read))
self._ssa_state_vars_read = [v for v in self._ssa_vars_read if isinstance(v, StateVariable)]
self._ssa_local_vars_read = [v for v in self._ssa_vars_read if isinstance(v, LocalVariable)]
self._ssa_vars_written = list(set(self._ssa_vars_written))
self._ssa_state_vars_written = [
v for v in self._ssa_vars_written if v and isinstance(v, StateIRVariable)
]
self._ssa_local_vars_written = [
v for v in self._ssa_vars_written if v and isinstance(v, LocalIRVariable)
]
vars_read = [self._convert_ssa(x) for x in self._ssa_vars_read]
vars_written = [self._convert_ssa(x) for x in self._ssa_vars_written]
self._vars_read += [v_ for v_ in vars_read if v_ and v_ not in self._vars_read]
self._state_vars_read = [v for v in self._vars_read if isinstance(v, StateVariable)]
self._local_vars_read = [v for v in self._vars_read if isinstance(v, LocalVariable)]
self._vars_written += [v_ for v_ in vars_written if v_ and v_ not in self._vars_written]
self._state_vars_written = [v for v in self._vars_written if isinstance(v, StateVariable)]
self._local_vars_written = [v for v in self._vars_written if isinstance(v, LocalVariable)]
# endregion
###################################################################################
###################################################################################
# region Built in definitions
###################################################################################
###################################################################################
def __str__(self) -> str:
additional_info = ""
if self.expression:
additional_info += " " + str(self.expression)
elif self.variable_declaration:
additional_info += " " + str(self.variable_declaration)
txt = str(self._node_type.value) + additional_info
return txt
# endregion
###################################################################################
###################################################################################
# region Utils
###################################################################################
###################################################################################
def link_nodes(node1: Node, node2: Node) -> None:
node1.add_son(node2)
node2.add_father(node1)
def insert_node(origin: Node, node_inserted: Node) -> None:
sons = origin.sons
link_nodes(origin, node_inserted)
for son in sons:
son.remove_father(origin)
origin.remove_son(son)
link_nodes(node_inserted, son)
def recheable(node: Node) -> Set[Node]:
"""
Return the set of nodes reacheable from the node
:param node:
:return: set(Node)
"""
nodes = node.sons
visited = set()
while nodes:
next_node = nodes[0]
nodes = nodes[1:]
if next_node not in visited:
visited.add(next_node)
for son in next_node.sons:
if son not in visited:
nodes.append(son)
return visited
# endregion
| Node |
python | ansible__ansible | test/units/module_utils/common/test_dict_transformations.py | {
"start": 2095,
"end": 3544
} | class ____:
def test_dict_merge(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_merge(base, other)
# string assertions
assert 'one' in result
assert 'two' in result
assert result['three'] == 4
assert result['four'] == 4
# dict assertions
assert 'obj1' in result
assert 'key1' in result['obj1']
assert 'key2' in result['obj1']
# list assertions
# this line differs from the network_utils/common test of the function of the
# same name as this method does not merge lists
assert result['l1'], [2, 1]
assert 'l2' in result
assert result['l3'], [1]
assert 'l4' in result
# nested assertions
assert 'obj1' in result
assert result['obj1']['key1'], 2
assert 'key2' in result['obj1']
# bool assertions
assert 'b1' in result
assert 'b2' in result
assert result['b3']
assert result['b4']
| TestCaseDictMerge |
python | pytorch__pytorch | test/dynamo/test_functions.py | {
"start": 2766,
"end": 3231
} | class ____:
a = 1
@torch.jit.script_if_tracing
def inline_script_if_tracing(x):
return x + 1.2
@torch.jit.ignore
def inline_ignore(x):
return x + 3.4
@torch.jit.unused
def inline_unused(x):
return x + 5.6
@functools.lru_cache
def inline_lru_cache_fn_with_default_args(x, y, _=None):
return torch.sin(x * y)
@torch.jit.script_if_tracing
def inline_script_if_tracing_fn_with_default_args(x, y, c=1.2):
return torch.cos(x * y) + c
| MyCls |
python | django__django | tests/admin_inlines/models.py | {
"start": 9713,
"end": 9840
} | class ____(Profile):
class Meta:
verbose_name_plural = "Model with verbose name plural only"
| VerboseNamePluralProfile |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/langhelpers.py | {
"start": 67810,
"end": 67941
} | class ____(enum.Enum):
Missing = enum.auto()
Missing = _Missing.Missing
MissingOr = Union[_T, Literal[_Missing.Missing]]
| _Missing |
python | getsentry__sentry | tests/sentry/issues/test_issue_search.py | {
"start": 1042,
"end": 6637
} | class ____(unittest.TestCase):
def test_key_mappings(self) -> None:
# Test a couple of keys to ensure things are working as expected
assert parse_search_query("bookmarks:123") == [
SearchFilter(
key=SearchKey(name="bookmarked_by"), operator="=", value=SearchValue("123")
)
]
assert parse_search_query("first-release:123") == [
SearchFilter(
key=SearchKey(name="first_release"), operator="=", value=SearchValue("123")
)
]
assert parse_search_query("first-release:123 non_mapped:456") == [
SearchFilter(
key=SearchKey(name="first_release"), operator="=", value=SearchValue("123")
),
SearchFilter(key=SearchKey(name="non_mapped"), operator="=", value=SearchValue("456")),
]
def test_is_query_unassigned(self) -> None:
assert parse_search_query("is:unassigned") == [
SearchFilter(key=SearchKey(name="unassigned"), operator="=", value=SearchValue(True))
]
assert parse_search_query("is:assigned") == [
SearchFilter(key=SearchKey(name="unassigned"), operator="=", value=SearchValue(False))
]
assert parse_search_query("!is:unassigned") == [
SearchFilter(key=SearchKey(name="unassigned"), operator="!=", value=SearchValue(True))
]
assert parse_search_query("!is:assigned") == [
SearchFilter(key=SearchKey(name="unassigned"), operator="!=", value=SearchValue(False))
]
def test_is_query_linked(self) -> None:
assert parse_search_query("is:linked") == [
SearchFilter(key=SearchKey(name="linked"), operator="=", value=SearchValue(True))
]
assert parse_search_query("is:unlinked") == [
SearchFilter(key=SearchKey(name="linked"), operator="=", value=SearchValue(False))
]
assert parse_search_query("!is:linked") == [
SearchFilter(key=SearchKey(name="linked"), operator="!=", value=SearchValue(True))
]
assert parse_search_query("!is:unlinked") == [
SearchFilter(key=SearchKey(name="linked"), operator="!=", value=SearchValue(False))
]
def test_is_query_status(self) -> None:
for status_string, status_val in STATUS_QUERY_CHOICES.items():
assert parse_search_query("is:%s" % status_string) == [
SearchFilter(
key=SearchKey(name="status"), operator="=", value=SearchValue(status_val)
)
]
assert parse_search_query("!is:%s" % status_string) == [
SearchFilter(
key=SearchKey(name="status"), operator="!=", value=SearchValue(status_val)
)
]
def test_is_query_invalid(self) -> None:
with pytest.raises(InvalidSearchQuery) as excinfo:
parse_search_query("is:wrong")
assert str(excinfo.value).startswith('Invalid value for "is" search, valid values are')
def test_is_query_inbox(self) -> None:
assert parse_search_query("is:for_review") == [
SearchFilter(key=SearchKey(name="for_review"), operator="=", value=SearchValue(True))
]
def test_numeric_filter(self) -> None:
# test numeric format
assert parse_search_query("times_seen:500") == [
SearchFilter(
key=SearchKey(name="times_seen"), operator="=", value=SearchValue(raw_value=500)
)
]
assert parse_search_query("times_seen:>500") == [
SearchFilter(
key=SearchKey(name="times_seen"), operator=">", value=SearchValue(raw_value=500)
)
]
assert parse_search_query("times_seen:<500") == [
SearchFilter(
key=SearchKey(name="times_seen"), operator="<", value=SearchValue(raw_value=500)
)
]
invalid_queries = [
"times_seen:<hello",
"times_seen:<512.1.0",
"times_seen:2018-01-01",
"times_seen:+7d",
"times_seen:>2018-01-01",
'times_seen:"<10"',
]
for invalid_query in invalid_queries:
with pytest.raises(InvalidSearchQuery, match="Invalid number"):
parse_search_query(invalid_query)
def test_boolean_operators_not_allowed(self) -> None:
invalid_queries = [
"user.email:foo@example.com OR user.email:bar@example.com",
"user.email:foo@example.com AND user.email:bar@example.com",
"user.email:foo@example.com OR user.email:bar@example.com OR user.email:foobar@example.com",
"user.email:foo@example.com AND user.email:bar@example.com AND user.email:foobar@example.com",
]
for invalid_query in invalid_queries:
with pytest.raises(
InvalidSearchQuery,
match='Boolean statements containing "OR" or "AND" are not supported in this search',
):
parse_search_query(invalid_query)
def test_parens_in_query(self) -> None:
assert parse_search_query(
"TypeError Anonymous function(app/javascript/utils/transform-object-keys)"
) == [
SearchFilter(
key=SearchKey(name="message"),
operator="=",
value=SearchValue(
raw_value="TypeError Anonymous function(app/javascript/utils/transform-object-keys)"
),
),
]
| ParseSearchQueryTest |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py | {
"start": 155054,
"end": 183673
} | class ____(quantize_model_test_base.QuantizedModelTest):
"""Test cases for dynamic range quantization.
Tries to run all tests cases in both the graph mode (default in TF1) and the
eager mode (default in TF2) to ensure support for when TF2 is disabled.
"""
@parameterized.parameters(
(True, quant_opts_pb2.XLA),
(False, quant_opts_pb2.XLA),
(True, quant_opts_pb2.UNIFORM_QUANTIZED),
(False, quant_opts_pb2.UNIFORM_QUANTIZED),
)
@test_util.run_in_graph_and_eager_modes
def test_einsum_model(
self,
constant_y_operand: bool,
target_opset: quant_opts_pb2.OpSet,
):
equation = 'abc,cde->abde'
_, y_shape, bias_shape, x_signature, y_signature = (
self._prepare_sample_einsum_datashapes(equation, use_bias=True)
)
model = self._create_einsum_model(
equation,
y_shape,
x_signature,
y_signature,
bias_shape,
activation_fn=nn_ops.relu,
)
if constant_y_operand:
signatures = {
'serving_default': model.einsum_with_kernel.get_concrete_function(),
}
else:
signatures = {
'serving_default': (
model.einsum_without_kernel.get_concrete_function()
),
}
saved_model_save.save(model, self._input_saved_model_path, signatures)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
# TODO(b/286489783): Support Einsum
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertFalse(self._contains_op(output_graphdef, 'XlaDotV2'))
self.assertTrue(self._contains_op(output_graphdef, 'BatchMatMulV2'))
else:
self.assertFalse(self._contains_op(output_graphdef, 'XlaDotV2'))
self.assertTrue(self._contains_op(output_graphdef, 'Einsum'))
@parameterized.named_parameters(
('to_tf_per_tensor', quant_opts_pb2.TF, False),
('to_xla_per_tensor', quant_opts_pb2.XLA, False),
(
'to_uniform_quantized_per_tensor',
quant_opts_pb2.UNIFORM_QUANTIZED,
False,
),
(
'to_uniform_quantized_per_channel',
quant_opts_pb2.UNIFORM_QUANTIZED,
True,
),
)
@test_util.run_in_graph_and_eager_modes
def test_matmul_model(
self,
target_opset: quant_opts_pb2.OpSet,
enable_per_channel_quantization: bool,
):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
enable_per_channel_quantization=enable_per_channel_quantization,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertTrue(
self._contains_op(output_graphdef, 'UniformQuantizedDotHybrid')
)
self.assertFalse(self._contains_op(output_graphdef, 'MatMul'))
if enable_per_channel_quantization:
quantized_axis_attr = attr_value_pb2.AttrValue(i=-1)
self.assertTrue(
self._contains_op(
output_graphdef,
'UniformQuantizedDotHybrid',
'rhs_quantization_axis',
quantized_axis_attr,
)
)
elif target_opset == quant_opts_pb2.XLA:
self.assertTrue(self._contains_op(output_graphdef, 'XlaDotV2'))
self.assertFalse(self._contains_op(output_graphdef, 'MatMul'))
else:
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
self.assertTrue(self._contains_op(output_graphdef, 'MatMul'))
@parameterized.named_parameters(
('to_tf_per_tensor', quant_opts_pb2.TF, False),
('to_xla_per_tensor', quant_opts_pb2.XLA, False),
(
'to_uniform_quantized_per_tensor',
quant_opts_pb2.UNIFORM_QUANTIZED,
False,
),
(
'to_uniform_quantized_per_channel',
quant_opts_pb2.UNIFORM_QUANTIZED,
True,
),
)
@test_util.run_in_graph_and_eager_modes
def test_conv_model(
self,
target_opset: quant_opts_pb2.OpSet,
enable_per_channel_quantization: bool,
):
filter_shape = (2, 3, 512, 2)
model = self._create_conv2d_model(
input_shape=(1, 3, 4, 512),
filter_shape=filter_shape,
has_bias=True,
has_batch_norm=True,
activation_fn=nn_ops.relu6,
)
saved_model_save.save(model, self._input_saved_model_path)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
enable_per_channel_quantization=enable_per_channel_quantization,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
if enable_per_channel_quantization:
quantized_axis = 3
quantized_axis_attr = attr_value_pb2.AttrValue(i=quantized_axis)
quantized_dim_size_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
shape=[
tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=filter_shape[quantized_axis]
)
]
)
]
)
)
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertTrue(
self._contains_op(
output_graphdef, 'UniformQuantizedConvolutionHybrid'
)
)
self.assertFalse(self._contains_op(output_graphdef, 'Conv2D'))
if enable_per_channel_quantization:
self.assertTrue(
self._contains_op(
output_graphdef,
'UniformQuantizedConvolutionHybrid',
'rhs_quantization_axis',
quantized_axis_attr,
)
)
self.assertTrue(
self._contains_op(
output_graphdef,
'Const',
'_output_shapes',
quantized_dim_size_attr,
)
)
elif target_opset == quant_opts_pb2.XLA:
self.assertTrue(self._contains_op(output_graphdef, 'XlaConvV2'))
self.assertFalse(self._contains_op(output_graphdef, 'Conv2D'))
else:
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
self.assertTrue(self._contains_op(output_graphdef, 'Conv2D'))
@parameterized.named_parameters(
('to_tf_per_tensor', quant_opts_pb2.TF, False),
('to_xla_per_tensor', quant_opts_pb2.XLA, False),
(
'to_uniform_quantized_per_tensor',
quant_opts_pb2.UNIFORM_QUANTIZED,
False,
),
(
'to_uniform_quantized_per_channel',
quant_opts_pb2.UNIFORM_QUANTIZED,
True,
),
)
@test_util.run_in_graph_and_eager_modes
def test_depthwise_conv_model(
self,
target_opset: quant_opts_pb2.OpSet,
enable_per_channel_quantization: bool,
):
filter_shape = (2, 3, 1024, 2)
strides = (1, 2, 2, 1)
model = self._create_depthwise_conv2d_model(
input_shape=(1, 3, 4, 1024), filter_shape=filter_shape, strides=strides
)
saved_model_save.save(model, self._input_saved_model_path)
tags = [tag_constants.SERVING]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
enable_per_channel_quantization=enable_per_channel_quantization,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
# Uniform Quantized op takes only the first and the second values for
# strides.
strides_to_check = (
(strides[1], strides[2])
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED
else strides
)
strides_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(i=strides_to_check)
)
if enable_per_channel_quantization:
quantized_axis_attr = attr_value_pb2.AttrValue(i=3)
quantized_dim_size_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
shape=[
tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=filter_shape[2] * filter_shape[3]
)
]
)
]
)
)
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertTrue(
self._contains_op(
output_graphdef,
'UniformQuantizedConvolutionHybrid',
'window_strides',
strides_attr,
)
)
self.assertFalse(
self._contains_op(output_graphdef, 'DepthwiseConv2dNative')
)
if enable_per_channel_quantization:
self.assertTrue(
self._contains_op(
output_graphdef,
'UniformQuantizedConvolutionHybrid',
'rhs_quantization_axis',
quantized_axis_attr,
)
)
self.assertTrue(
self._contains_op(
output_graphdef,
'Const',
'_output_shapes',
quantized_dim_size_attr,
)
)
elif target_opset == quant_opts_pb2.XLA:
self.assertTrue(self._contains_op(output_graphdef, 'XlaConvV2'))
self.assertFalse(
self._contains_op(output_graphdef, 'DepthwiseConv2dNative')
)
else:
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
self.assertTrue(
self._contains_op(
output_graphdef, 'DepthwiseConv2dNative', 'strides', strides_attr
)
)
@parameterized.named_parameters(
('to_tf_use_constant', quant_opts_pb2.TF, False),
('to_xla_use_constant', quant_opts_pb2.XLA, False),
(
'to_uniform_quantized_use_constant',
quant_opts_pb2.UNIFORM_QUANTIZED,
False,
),
('to_tf_use_variable', quant_opts_pb2.TF, True),
('to_xla_use_variable', quant_opts_pb2.XLA, True),
(
'to_uniform_quantized_use_variable',
quant_opts_pb2.UNIFORM_QUANTIZED,
True,
),
)
@test_util.run_v2_only
def test_gather_model(
self, target_opset: quant_opts_pb2.OpSet, use_variable: bool
):
input_type = dtypes.int64
model = self._create_gather_model(input_type, use_variable)
saved_model_save.save(model, self._input_saved_model_path)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertGreater(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
0.65,
)
else:
# Due to other meta data, the compression is not exactly 1/4.
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
1 / 3,
)
@parameterized.named_parameters(
('to_tf_with_int32_input_type', dtypes.int32, quant_opts_pb2.TF),
('to_xla_with_int32_input_type', dtypes.int32, quant_opts_pb2.XLA),
('to_xla_with_int64_input_type', dtypes.int64, quant_opts_pb2.XLA),
(
'to_uq_with_int32_input_type',
dtypes.int32,
quant_opts_pb2.UNIFORM_QUANTIZED,
),
)
@test_util.run_v2_only
def test_gather_and_conv_model(
self, input_type: dtypes, target_opset: quant_opts_pb2.OpSet
):
model = self._create_simple_gather_and_conv_model(
input_type, filter_shape=(2, 3, 3, 1024)
)
saved_model_save.save(model, self._input_saved_model_path)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertGreater(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
0.65,
)
self.assertTrue(
self._contains_op(
output_graphdef, 'UniformQuantizedConvolutionHybrid'
)
)
else:
# Due to other meta data, the compression is not exactly 1/4.
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
1 / 3,
)
if target_opset == quant_opts_pb2.XLA:
self.assertTrue(self._contains_op(output_graphdef, 'XlaConvV2'))
else:
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_conv_model_with_wrong_tags_raises_error(self):
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
save_tags = {tag_constants.TRAINING, tag_constants.GPU}
input_placeholder = self._create_and_save_tf1_conv_model(
self._input_saved_model_path,
signature_key,
save_tags,
input_key='input',
output_key='output',
use_variable=True,
)
tags = {tag_constants.SERVING}
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=quant_opts_pb2.OpSet.UNIFORM_QUANTIZED,
)
# Try to use a different set of tags to quantize.
data_gen = self._create_data_generator(
input_key='input', shape=input_placeholder.shape
)
# StatusNotOk error. `Exception` is used here because importing
# `StatusNotOk` may break the open-sourced version of TensorFlow.
with self.assertRaisesRegex(
Exception,
'could not be found in SavedModel, with available tags',
) as raises:
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
self.assertEqual(raises.exception.__class__.__name__, 'RuntimeError')
@parameterized.named_parameters(
('quantize', True, 0),
('not_quantize', False, 10000),
)
@test_util.run_in_graph_and_eager_modes
def test_minimum_elements_for_weights(self, quantize, num_elements):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.OpSet.UNIFORM_QUANTIZED,
)
quantization_options.min_num_elements_for_weights = num_elements
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
type_attr = attr_value_pb2.AttrValue(type=types_pb2.DT_QINT8)
if quantize:
self.assertTrue(
self._contains_op(output_graphdef, 'Const', 'dtype', type_attr)
)
else:
self.assertFalse(
self._contains_op(output_graphdef, 'Const', 'dtype', type_attr)
)
@parameterized.named_parameters(
('to_tf_use_constant', quant_opts_pb2.TF, False),
('to_xla_use_constant', quant_opts_pb2.XLA, False),
(
'to_uniform_quantized_use_constant',
quant_opts_pb2.UNIFORM_QUANTIZED,
False,
),
('to_tf_use_variable', quant_opts_pb2.TF, True),
('to_xla_use_variable', quant_opts_pb2.XLA, True),
(
'to_uniform_quantized_use_variable',
quant_opts_pb2.UNIFORM_QUANTIZED,
True,
),
)
@test_util.run_in_graph_and_eager_modes
def test_gather_model_tf1(
self, target_opset: quant_opts_pb2.OpSet, use_variable: bool
):
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
_ = self._create_and_save_tf1_gather_model(
self._input_saved_model_path,
signature_key,
tags,
input_key='x',
output_key='output',
input_type=dtypes.int32,
use_variable=use_variable,
)
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), signature_keys
)
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
threshold = 0.45 if use_variable else 0.7
self.assertGreater(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
threshold,
)
else:
threshold = 0.19 if use_variable else 0.42
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
threshold,
)
@test_util.run_in_graph_and_eager_modes
def test_non_empty_directory_raises_file_exists_error(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
# Create a file inside the output directory.
file_io.write_string_to_file(
filename=os.path.join(self._output_saved_model_path, 'dummy_file.txt'),
file_content='Test content',
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
)
with self.assertRaisesRegex(
FileExistsError, 'Output directory already exists'
):
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
@test_util.run_in_graph_and_eager_modes
def test_non_empty_directory_overwritten(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
# Create a file inside the output directory.
file_io.write_string_to_file(
filename=os.path.join(self._output_saved_model_path, 'dummy_file.txt'),
file_content='Test content',
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
overwrite_output_directory=True,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
def test_table_initialized_when_model_has_table_tf1(self):
tags = {tag_constants.SERVING}
signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# Create and save a simple model that involves a hash table.
inputs, outputs = self._create_and_save_vocab_table_lookup_model_tf1(
self._input_saved_model_path, tags, signature_def_key
)
# Make sure that the desired input key and output key is present.
self.assertIn('input_vocabs', inputs.keys())
self.assertIn('lookup', outputs.keys())
signature_def_keys = [signature_def_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_def_keys,
)
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
# Tests table lookup to make sure the table has been initialized
# successfully.
with session.Session(graph=ops.Graph()) as sess:
output_meta_graph_def = saved_model_loader.load(
sess, tags=tags, export_dir=self._output_saved_model_path
)
self.assertCountEqual(
output_meta_graph_def.signature_def.keys(), signature_def_keys
)
signature_def = output_meta_graph_def.signature_def[signature_def_key]
input_tensor_name = signature_def.inputs['input_vocabs'].name
input_tensor = sess.graph.get_tensor_by_name(input_tensor_name)
lookup_tensor_name = signature_def.outputs['lookup'].name
lookup_tensor = sess.graph.get_tensor_by_name(lookup_tensor_name)
lookup_val = sess.run(
lookup_tensor,
feed_dict={
input_tensor: np.array([b'model', b'quantization', b'hello'])
},
)
self.assertAllClose(lookup_val, [1.0, 2.0, 0.0])
def test_file_init_hash_table_lookup_model(self):
tags = {tag_constants.SERVING}
signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# Create and save a simple model that involves a hash table.
inputs, outputs = self._create_and_save_file_init_hash_table_model_tf1(
self._input_saved_model_path, tags, signature_def_key
)
# Make sure that the desired input key and output key is present.
self.assertIn('input_vocabs', inputs.keys())
self.assertIn('lookup', outputs.keys())
signature_def_keys = [signature_def_key]
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options=quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_DYNAMIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_def_keys,
),
)
# Tests table lookup to make sure the table has been initialized
# successfully.
with session.Session(graph=ops.Graph()) as sess:
output_meta_graph_def = saved_model_loader.load(
sess, tags=tags, export_dir=self._output_saved_model_path
)
self.assertCountEqual(
output_meta_graph_def.signature_def.keys(), signature_def_keys
)
signature_def = output_meta_graph_def.signature_def[signature_def_key]
input_tensor_name = signature_def.inputs['input_vocabs'].name
input_tensor = sess.graph.get_tensor_by_name(input_tensor_name)
lookup_tensor_name = signature_def.outputs['lookup'].name
lookup_tensor = sess.graph.get_tensor_by_name(lookup_tensor_name)
lookup_val = sess.run(
lookup_tensor,
feed_dict={
input_tensor: np.array([b'dynamic', b'quantization', b'range'])
},
)
# "dynamic" is not in the table: -1 (default value)
self.assertAllClose(lookup_val, [-1.0, 2.0, 1.0])
| DynamicRangeQuantizationTest |
python | tensorflow__tensorflow | third_party/xla/build_tools/configure/configure_test.py | {
"start": 1771,
"end": 8088
} | class ____(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
testdata = (
test_utils.xla_src_root() / "build_tools" / "configure" / "testdata"
)
with (testdata / "clang_local.bazelrc").open() as f:
cls.clang_local_bazelrc_lines = [line.strip() for line in f.readlines()]
with (testdata / "gcc.bazelrc").open() as f:
resolved_gcc_path = os.path.realpath(_GCC_PATH)
cls.gcc_bazelrc_lines = [
line.strip().replace(_GCC_PATH, resolved_gcc_path)
for line in f.readlines()
]
with (testdata / "cuda_clang.bazelrc").open() as f:
cls.cuda_clang_bazelrc_lines = [line.strip() for line in f.readlines()]
with (testdata / "cuda_clang_local.bazelrc").open() as f:
cls.cuda_clang_local_bazelrc_lines = [
line.strip() for line in f.readlines()
]
with (testdata / "default_cuda_clang.bazelrc").open() as f:
cls.default_cuda_clang_bazelrc_lines = [
line.strip() for line in f.readlines()
]
with (testdata / "nvcc_clang_local.bazelrc").open() as f:
cls.nvcc_clang_local_bazelrc_lines = [
line.strip() for line in f.readlines()
]
with (testdata / "nvcc_gcc.bazelrc").open() as f:
resolved_gcc_path = os.path.realpath(_GCC_PATH)
cls.nvcc_gcc_bazelrc_lines = [
line.strip().replace(_GCC_PATH, resolved_gcc_path)
for line in f.readlines()
]
def test_clang_bazelrc(self):
config = XLAConfigOptions(
backend=Backend.CPU,
os=OS.LINUX,
python_bin_path=_PYTHON_BIN_PATH,
host_compiler=HostCompiler.CLANG,
compiler_options=list(_COMPILER_OPTIONS),
cuda_compiler=CudaCompiler.NVCC,
using_nccl=False,
rocm_compiler=RocmCompiler.HIPCC,
sycl_compiler=SyclCompiler.ICPX,
)
bazelrc_lines = config.to_bazelrc_lines(
DiscoverablePathsAndVersions(
clang_path=_CLANG_PATH,
ld_library_path="",
clang_major_version=18,
)
)
self.assertEqual(bazelrc_lines, self.clang_local_bazelrc_lines)
def test_gcc_bazelrc(self):
config = XLAConfigOptions(
backend=Backend.CPU,
os=OS.LINUX,
python_bin_path=_PYTHON_BIN_PATH,
host_compiler=HostCompiler.GCC,
compiler_options=list(_COMPILER_OPTIONS),
cuda_compiler=CudaCompiler.NVCC,
using_nccl=False,
rocm_compiler=RocmCompiler.HIPCC,
sycl_compiler=SyclCompiler.ICPX,
)
bazelrc_lines = config.to_bazelrc_lines(
DiscoverablePathsAndVersions(
gcc_path=_GCC_PATH,
ld_library_path="",
)
)
self.assertEqual(bazelrc_lines, self.gcc_bazelrc_lines)
def test_cuda_clang_bazelrc(self):
config = XLAConfigOptions(
backend=Backend.CUDA,
os=OS.LINUX,
python_bin_path=_PYTHON_BIN_PATH,
host_compiler=HostCompiler.CLANG,
compiler_options=list(_COMPILER_OPTIONS),
cuda_compiler=CudaCompiler.CLANG,
using_nccl=False,
rocm_compiler=RocmCompiler.HIPCC,
sycl_compiler=SyclCompiler.ICPX,
)
bazelrc_lines = config.to_bazelrc_lines(
DiscoverablePathsAndVersions(
clang_path=None,
clang_major_version=None,
**_CUDA_SPECIFIC_PATHS_AND_VERSIONS,
)
)
self.assertEqual(bazelrc_lines, self.cuda_clang_bazelrc_lines)
def test_cuda_clang_local_bazelrc(self):
config = XLAConfigOptions(
backend=Backend.CUDA,
os=OS.LINUX,
python_bin_path=_PYTHON_BIN_PATH,
host_compiler=HostCompiler.CLANG,
compiler_options=list(_COMPILER_OPTIONS),
cuda_compiler=CudaCompiler.CLANG,
using_nccl=False,
rocm_compiler=RocmCompiler.HIPCC,
sycl_compiler=SyclCompiler.ICPX,
)
bazelrc_lines = config.to_bazelrc_lines(
DiscoverablePathsAndVersions(
clang_path=_CLANG_PATH,
clang_major_version=18,
**_CUDA_SPECIFIC_PATHS_AND_VERSIONS,
)
)
self.assertEqual(bazelrc_lines, self.cuda_clang_local_bazelrc_lines)
def test_default_cuda_clang_bazelrc(self):
config = XLAConfigOptions(
backend=Backend.CUDA,
os=OS.LINUX,
python_bin_path=_PYTHON_BIN_PATH,
host_compiler=HostCompiler.CLANG,
compiler_options=list(_COMPILER_OPTIONS),
cuda_compiler=CudaCompiler.CLANG,
using_nccl=False,
rocm_compiler=RocmCompiler.HIPCC,
sycl_compiler=SyclCompiler.ICPX,
)
bazelrc_lines = config.to_bazelrc_lines(
DiscoverablePathsAndVersions(
clang_path=_CLANG_PATH,
clang_major_version=17,
**_CUDA_COMPUTE_CAPABILITIES_AND_LD_LIBRARY_PATH,
)
)
self.assertEqual(bazelrc_lines, self.default_cuda_clang_bazelrc_lines)
def test_nvcc_clang_local_bazelrc(self):
config = XLAConfigOptions(
backend=Backend.CUDA,
os=OS.LINUX,
python_bin_path=_PYTHON_BIN_PATH,
host_compiler=HostCompiler.CLANG,
compiler_options=list(_COMPILER_OPTIONS),
cuda_compiler=CudaCompiler.NVCC,
using_nccl=False,
rocm_compiler=RocmCompiler.HIPCC,
sycl_compiler=SyclCompiler.ICPX,
)
bazelrc_lines = config.to_bazelrc_lines(
DiscoverablePathsAndVersions(
clang_path=_CLANG_PATH,
clang_major_version=18,
**_CUDA_SPECIFIC_PATHS_AND_VERSIONS,
)
)
self.assertEqual(bazelrc_lines, self.nvcc_clang_local_bazelrc_lines)
def test_nvcc_gcc_bazelrc(self):
config = XLAConfigOptions(
backend=Backend.CUDA,
os=OS.LINUX,
python_bin_path=_PYTHON_BIN_PATH,
host_compiler=HostCompiler.GCC,
compiler_options=list(_COMPILER_OPTIONS),
cuda_compiler=CudaCompiler.NVCC,
using_nccl=False,
rocm_compiler=RocmCompiler.HIPCC,
sycl_compiler=SyclCompiler.ICPX,
)
bazelrc_lines = config.to_bazelrc_lines(
DiscoverablePathsAndVersions(
gcc_path=_GCC_PATH,
**_CUDA_SPECIFIC_PATHS_AND_VERSIONS,
)
)
self.assertEqual(bazelrc_lines, self.nvcc_gcc_bazelrc_lines)
if __name__ == "__main__":
absltest.main()
| ConfigureTest |
python | django__django | tests/serializers/models/data.py | {
"start": 7129,
"end": 7217
} | class ____(AbstractBaseModel):
child_data = models.IntegerField()
| InheritAbstractModel |
python | ansible__ansible | lib/ansible/module_utils/facts/network/freebsd.py | {
"start": 845,
"end": 1029
} | class ____(GenericBsdIfconfigNetwork):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
| FreeBSDNetwork |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF008_attrs.py | {
"start": 1034,
"end": 1313
} | class ____:
mutable_default: list[int] = []
immutable_annotation: Sequence[int] = []
without_annotation = []
correct_code: list[int] = KNOWINGLY_MUTABLE_DEFAULT
perfectly_fine: list[int] = field(default_factory=list)
class_variable: ClassVar[list[int]] = []
| D |
python | kubernetes-client__python | kubernetes/client/api/flowcontrol_apiserver_api.py | {
"start": 543,
"end": 5214
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/flowcontrol.apiserver.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| FlowcontrolApiserverApi |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 103720,
"end": 105871
} | class ____:
"""Test zu_ZA address provider methods"""
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{4}", postcode)
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in ZuZaAddressProvider.cities
def test_city_suffix(self, faker, num_samples):
for _ in range(num_samples):
city_suffix = faker.city_suffix()
assert isinstance(city_suffix, str)
assert city_suffix in ZuZaAddressProvider.city_suffixes
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in ZuZaAddressProvider.cities
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in ZuZaAddressProvider.countries
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
assert street_name in ZuZaAddressProvider.street_names
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
def test_province(self, faker, num_samples):
for _ in range(num_samples):
province = faker.province()
assert isinstance(province, str)
assert province in ZuZaAddressProvider.provinces
def test_administrative_unit(self, faker, num_samples):
for _ in range(num_samples):
administrative_unit = faker.administrative_unit()
assert isinstance(administrative_unit, str)
assert administrative_unit in ZuZaAddressProvider.provinces
| TestZuZa |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 52057,
"end": 52181
} | class ____(Elemwise):
_projection_passthrough = True
_parameters = ["frame", "decimals"]
operation = M.round
| Round |
python | getsentry__sentry | src/sentry/uptime/models.py | {
"start": 6836,
"end": 6985
} | class ____(enum.StrEnum):
ROUND_ROBIN = "round_robin"
@data_source_type_registry.register(DATA_SOURCE_UPTIME_SUBSCRIPTION)
| UptimeRegionScheduleMode |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 168095,
"end": 168537
} | class ____(RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
| RecvmsgRFC3542AncillaryUDPLITE6Test |
python | py-pdf__pypdf | pypdf/_writer.py | {
"start": 3694,
"end": 4282
} | class ____(enum.IntFlag):
NONE = 0
TEXT = enum.auto()
LINKS = enum.auto()
ATTACHMENTS = enum.auto()
OBJECTS_3D = enum.auto()
ALL_ANNOTATIONS = enum.auto()
XOBJECT_IMAGES = enum.auto()
INLINE_IMAGES = enum.auto()
DRAWING_IMAGES = enum.auto()
IMAGES = XOBJECT_IMAGES | INLINE_IMAGES | DRAWING_IMAGES
def _rolling_checksum(stream: BytesIO, blocksize: int = 65536) -> str:
hash = hashlib.md5(usedforsecurity=False)
for block in iter(lambda: stream.read(blocksize), b""):
hash.update(block)
return hash.hexdigest()
| ObjectDeletionFlag |
python | PyCQA__pylint | tests/functional/u/unused/unused_import.py | {
"start": 2460,
"end": 2792
} | class ____(six.with_metaclass(ABCMeta)):
"""Regression test for https://github.com/pylint-dev/pylint/issues/7506.
Requires six."""
# Regression test for https://github.com/pylint-dev/pylint/issues/3765
# `unused-import` should not be emitted when a type annotation uses quotation marks
from typing import List
| WithMetaclass |
python | google__jax | jax/_src/source_info_util.py | {
"start": 7187,
"end": 8502
} | class ____:
__slots__ = ['traceback', 'name_stack', 'prev']
def __init__(self, traceback: Traceback | None, *,
name_stack: NameStack | None = None):
self.traceback = traceback
self.name_stack = name_stack
def __enter__(self):
self.prev = _source_info_context.context
_source_info_context.context = _source_info_context.context.replace(
traceback=self.traceback, name_stack=self.name_stack)
def __exit__(self, exc_type, exc_value, traceback):
_source_info_context.context = self.prev
if exc_type is None or exc_value is None:
return
if self.traceback is None or has_user_context(exc_value):
return
filtered_tb = traceback_util.filter_traceback(self.traceback.as_python_traceback())
if filtered_tb:
msg = traceback_util.format_exception_only(exc_value)
msg = f'{msg}\n\n{_message}'
exp = JaxStackTraceBeforeTransformation(msg).with_traceback(filtered_tb)
exp.__context__ = exc_value.__context__
exp.__cause__ = exc_value.__cause__
exp.__suppress_context__ = exc_value.__suppress_context__
exc_value.__context__ = None
exc_value.__cause__ = exp
user_context = UserContextManager
def current_name_stack() -> NameStack:
return _source_info_context.context.name_stack
| UserContextManager |
python | openai__openai-python | src/openai/types/realtime/conversation_item_created_event.py | {
"start": 281,
"end": 825
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
type: Literal["conversation.item.created"]
"""The event type, must be `conversation.item.created`."""
previous_item_id: Optional[str] = None
"""
The ID of the preceding item in the Conversation context, allows the client to
understand the order of the conversation. Can be `null` if the item has no
predecessor.
"""
| ConversationItemCreatedEvent |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/instigation.py | {
"start": 2153,
"end": 3467
} | class ____(
NamedTuple(
"_DynamicPartitionsRequestResult",
[
("partitions_def_name", str),
("added_partitions", Optional[Sequence[str]]),
("deleted_partitions", Optional[Sequence[str]]),
("skipped_partitions", Sequence[str]),
],
)
):
def __new__(
cls,
partitions_def_name: str,
added_partitions: Optional[Sequence[str]],
deleted_partitions: Optional[Sequence[str]],
skipped_partitions: Sequence[str],
):
check.opt_sequence_param(added_partitions, "added_partitions")
check.opt_sequence_param(deleted_partitions, "deleted_partitions")
# One of added_partitions or deleted_partitions must be a sequence, and the other must be None
if not xor(added_partitions is None, deleted_partitions is None):
check.failed("Exactly one of added_partitions and deleted_partitions must be provided")
return super().__new__(
cls,
check.str_param(partitions_def_name, "partitions_def_name"),
added_partitions,
deleted_partitions,
check.sequence_param(skipped_partitions, "skipped_partitions"),
)
@whitelist_for_serdes(old_storage_names={"SensorJobData"})
| DynamicPartitionsRequestResult |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 32128,
"end": 240352
} | class ____(Compiled):
"""Default implementation of :class:`.Compiled`.
Compiles :class:`_expression.ClauseElement` objects into SQL strings.
"""
extract_map = EXTRACT_MAP
bindname_escape_characters: ClassVar[Mapping[str, str]] = (
util.immutabledict(
{
"%": "P",
"(": "A",
")": "Z",
":": "C",
".": "_",
"[": "_",
"]": "_",
" ": "_",
}
)
)
"""A mapping (e.g. dict or similar) containing a lookup of
characters keyed to replacement characters which will be applied to all
'bind names' used in SQL statements as a form of 'escaping'; the given
characters are replaced entirely with the 'replacement' character when
rendered in the SQL statement, and a similar translation is performed
on the incoming names used in parameter dictionaries passed to methods
like :meth:`_engine.Connection.execute`.
This allows bound parameter names used in :func:`_sql.bindparam` and
other constructs to have any arbitrary characters present without any
concern for characters that aren't allowed at all on the target database.
Third party dialects can establish their own dictionary here to replace the
default mapping, which will ensure that the particular characters in the
mapping will never appear in a bound parameter name.
The dictionary is evaluated at **class creation time**, so cannot be
modified at runtime; it must be present on the class when the class
is first declared.
Note that for dialects that have additional bound parameter rules such
as additional restrictions on leading characters, the
:meth:`_sql.SQLCompiler.bindparam_string` method may need to be augmented.
See the cx_Oracle compiler for an example of this.
.. versionadded:: 2.0.0rc1
"""
_bind_translate_re: ClassVar[Pattern[str]]
_bind_translate_chars: ClassVar[Mapping[str, str]]
is_sql = True
compound_keywords = COMPOUND_KEYWORDS
isdelete: bool = False
isinsert: bool = False
isupdate: bool = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
postfetch: Optional[List[Column[Any]]]
"""list of columns that can be post-fetched after INSERT or UPDATE to
receive server-updated values"""
insert_prefetch: Sequence[Column[Any]] = ()
"""list of columns for which default values should be evaluated before
an INSERT takes place"""
update_prefetch: Sequence[Column[Any]] = ()
"""list of columns for which onupdate default values should be evaluated
before an UPDATE takes place"""
implicit_returning: Optional[Sequence[ColumnElement[Any]]] = None
"""list of "implicit" returning columns for a toplevel INSERT or UPDATE
statement, used to receive newly generated values of columns.
.. versionadded:: 2.0 ``implicit_returning`` replaces the previous
``returning`` collection, which was not a generalized RETURNING
collection and instead was in fact specific to the "implicit returning"
feature.
"""
isplaintext: bool = False
binds: Dict[str, BindParameter[Any]]
"""a dictionary of bind parameter keys to BindParameter instances."""
bind_names: Dict[BindParameter[Any], str]
"""a dictionary of BindParameter instances to "compiled" names
that are actually present in the generated SQL"""
stack: List[_CompilerStackEntry]
"""major statements such as SELECT, INSERT, UPDATE, DELETE are
tracked in this stack using an entry format."""
returning_precedes_values: bool = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from: bool = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules: bool = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
bindtemplate: str
"""template to render bound parameters based on paramstyle."""
compilation_bindtemplate: str
"""template used by compiler to render parameters before positional
paramstyle application"""
_numeric_binds_identifier_char: str
"""Character that's used to as the identifier of a numerical bind param.
For example if this char is set to ``$``, numerical binds will be rendered
in the form ``$1, $2, $3``.
"""
_result_columns: List[ResultColumnsEntry]
"""relates label names in the final SQL to a tuple of local
column/label name, ColumnElement object (if any) and
TypeEngine. CursorResult uses this for type processing and
column targeting"""
_textual_ordered_columns: bool = False
"""tell the result object that the column names as rendered are important,
but they are also "ordered" vs. what is in the compiled object here.
As of 1.4.42 this condition is only present when the statement is a
TextualSelect, e.g. text("....").columns(...), where it is required
that the columns are considered positionally and not by name.
"""
_ad_hoc_textual: bool = False
"""tell the result that we encountered text() or '*' constructs in the
middle of the result columns, but we also have compiled columns, so
if the number of columns in cursor.description does not match how many
expressions we have, that means we can't rely on positional at all and
should match on name.
"""
_ordered_columns: bool = True
"""
if False, means we can't be sure the list of entries
in _result_columns is actually the rendered order. Usually
True unless using an unordered TextualSelect.
"""
_loose_column_name_matching: bool = False
"""tell the result object that the SQL statement is textual, wants to match
up to Column objects, and may be using the ._tq_label in the SELECT rather
than the base name.
"""
_numeric_binds: bool = False
"""
True if paramstyle is "numeric". This paramstyle is trickier than
all the others.
"""
_render_postcompile: bool = False
"""
whether to render out POSTCOMPILE params during the compile phase.
This attribute is used only for end-user invocation of stmt.compile();
it's never used for actual statement execution, where instead the
dialect internals access and render the internal postcompile structure
directly.
"""
_post_compile_expanded_state: Optional[ExpandedState] = None
"""When render_postcompile is used, the ``ExpandedState`` used to create
the "expanded" SQL is assigned here, and then used by the ``.params``
accessor and ``.construct_params()`` methods for their return values.
.. versionadded:: 2.0.0rc1
"""
_pre_expanded_string: Optional[str] = None
"""Stores the original string SQL before 'post_compile' is applied,
for cases where 'post_compile' were used.
"""
_pre_expanded_positiontup: Optional[List[str]] = None
_insertmanyvalues: Optional[_InsertManyValues] = None
_insert_crud_params: Optional[crud._CrudParamSequence] = None
literal_execute_params: FrozenSet[BindParameter[Any]] = frozenset()
"""bindparameter objects that are rendered as literal values at statement
execution time.
"""
post_compile_params: FrozenSet[BindParameter[Any]] = frozenset()
"""bindparameter objects that are rendered as bound parameter placeholders
at statement execution time.
"""
escaped_bind_names: util.immutabledict[str, str] = util.EMPTY_DICT
"""Late escaping of bound parameter names that has to be converted
to the original name when looking in the parameter dictionary.
"""
has_out_parameters = False
"""if True, there are bindparam() objects that have the isoutparam
flag set."""
postfetch_lastrowid = False
"""if True, and this in insert, use cursor.lastrowid to populate
result.inserted_primary_key. """
_cache_key_bind_match: Optional[
Tuple[
Dict[
BindParameter[Any],
List[BindParameter[Any]],
],
Dict[
str,
BindParameter[Any],
],
]
] = None
"""a mapping that will relate the BindParameter object we compile
to those that are part of the extracted collection of parameters
in the cache key, if we were given a cache key.
"""
positiontup: Optional[List[str]] = None
"""for a compiled construct that uses a positional paramstyle, will be
a sequence of strings, indicating the names of bound parameters in order.
This is used in order to render bound parameters in their correct order,
and is combined with the :attr:`_sql.Compiled.params` dictionary to
render parameters.
This sequence always contains the unescaped name of the parameters.
.. seealso::
:ref:`faq_sql_expression_string` - includes a usage example for
debugging use cases.
"""
_values_bindparam: Optional[List[str]] = None
_visited_bindparam: Optional[List[str]] = None
inline: bool = False
ctes: Optional[MutableMapping[CTE, str]]
# Detect same CTE references - Dict[(level, name), cte]
# Level is required for supporting nesting
ctes_by_level_name: Dict[Tuple[int, str], CTE]
# To retrieve key/level in ctes_by_level_name -
# Dict[cte_reference, (level, cte_name, cte_opts)]
level_name_by_cte: Dict[CTE, Tuple[int, str, selectable._CTEOpts]]
ctes_recursive: bool
_post_compile_pattern = re.compile(r"__\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]")
_pyformat_pattern = re.compile(r"%\(([^)]+?)\)s")
_positional_pattern = re.compile(
f"{_pyformat_pattern.pattern}|{_post_compile_pattern.pattern}"
)
_collect_params: Final[bool]
_collected_params: util.immutabledict[str, Any]
@classmethod
def _init_compiler_cls(cls):
cls._init_bind_translate()
@classmethod
def _init_bind_translate(cls):
reg = re.escape("".join(cls.bindname_escape_characters))
cls._bind_translate_re = re.compile(f"[{reg}]")
cls._bind_translate_chars = cls.bindname_escape_characters
def __init__(
self,
dialect: Dialect,
statement: Optional[ClauseElement],
cache_key: Optional[CacheKey] = None,
column_keys: Optional[Sequence[str]] = None,
for_executemany: bool = False,
linting: Linting = NO_LINTING,
_supporting_against: Optional[SQLCompiler] = None,
**kwargs: Any,
):
"""Construct a new :class:`.SQLCompiler` object.
:param dialect: :class:`.Dialect` to be used
:param statement: :class:`_expression.ClauseElement` to be compiled
:param column_keys: a list of column names to be compiled into an
INSERT or UPDATE statement.
:param for_executemany: whether INSERT / UPDATE statements should
expect that they are to be invoked in an "executemany" style,
which may impact how the statement will be expected to return the
values of defaults and autoincrement / sequences and similar.
Depending on the backend and driver in use, support for retrieving
these values may be disabled which means SQL expressions may
be rendered inline, RETURNING may not be rendered, etc.
:param kwargs: additional keyword arguments to be consumed by the
superclass.
"""
self.column_keys = column_keys
self.cache_key = cache_key
if cache_key:
cksm = {b.key: b for b in cache_key[1]}
ckbm = {b: [b] for b in cache_key[1]}
self._cache_key_bind_match = (ckbm, cksm)
# compile INSERT/UPDATE defaults/sequences to expect executemany
# style execution, which may mean no pre-execute of defaults,
# or no RETURNING
self.for_executemany = for_executemany
self.linting = linting
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
self._result_columns = []
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self._numeric_binds = nb = dialect.paramstyle.startswith("numeric")
if nb:
self._numeric_binds_identifier_char = (
"$" if dialect.paramstyle == "numeric_dollar" else ":"
)
self.compilation_bindtemplate = _pyformat_template
else:
self.compilation_bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
self.label_length = (
dialect.label_length or dialect.max_identifier_length
)
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = prefix_anon_map()
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names: Dict[Tuple[str, str], str] = {}
self._truncated_counters: Dict[str, int] = {}
if not cache_key:
self._collect_params = True
self._collected_params = util.EMPTY_DICT
else:
self._collect_params = False # type: ignore[misc]
Compiled.__init__(self, dialect, statement, **kwargs)
if self.isinsert or self.isupdate or self.isdelete:
if TYPE_CHECKING:
assert isinstance(statement, UpdateBase)
if self.isinsert or self.isupdate:
if TYPE_CHECKING:
assert isinstance(statement, ValuesBase)
if statement._inline:
self.inline = True
elif self.for_executemany and (
not self.isinsert
or (
self.dialect.insert_executemany_returning
and statement._return_defaults
)
):
self.inline = True
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
if _supporting_against:
self.__dict__.update(
{
k: v
for k, v in _supporting_against.__dict__.items()
if k
not in {
"state",
"dialect",
"preparer",
"positional",
"_numeric_binds",
"compilation_bindtemplate",
"bindtemplate",
}
}
)
if self.state is CompilerState.STRING_APPLIED:
if self.positional:
if self._numeric_binds:
self._process_numeric()
else:
self._process_positional()
if self._render_postcompile:
parameters = self.construct_params(
escape_names=False,
_no_postcompile=True,
)
self._process_parameters_for_postcompile(
parameters, _populate_self=True
)
@property
def insert_single_values_expr(self) -> Optional[str]:
"""When an INSERT is compiled with a single set of parameters inside
a VALUES expression, the string is assigned here, where it can be
used for insert batching schemes to rewrite the VALUES expression.
.. versionchanged:: 2.0 This collection is no longer used by
SQLAlchemy's built-in dialects, in favor of the currently
internal ``_insertmanyvalues`` collection that is used only by
:class:`.SQLCompiler`.
"""
if self._insertmanyvalues is None:
return None
else:
return self._insertmanyvalues.single_values_expr
@util.ro_memoized_property
def effective_returning(self) -> Optional[Sequence[ColumnElement[Any]]]:
"""The effective "returning" columns for INSERT, UPDATE or DELETE.
This is either the so-called "implicit returning" columns which are
calculated by the compiler on the fly, or those present based on what's
present in ``self.statement._returning`` (expanded into individual
columns using the ``._all_selected_columns`` attribute) i.e. those set
explicitly using the :meth:`.UpdateBase.returning` method.
.. versionadded:: 2.0
"""
if self.implicit_returning:
return self.implicit_returning
elif self.statement is not None and is_dml(self.statement):
return [
c
for c in self.statement._all_selected_columns
if is_column_element(c)
]
else:
return None
@property
def returning(self):
"""backwards compatibility; returns the
effective_returning collection.
"""
return self.effective_returning
@property
def current_executable(self):
"""Return the current 'executable' that is being compiled.
This is currently the :class:`_sql.Select`, :class:`_sql.Insert`,
:class:`_sql.Update`, :class:`_sql.Delete`,
:class:`_sql.CompoundSelect` object that is being compiled.
Specifically it's assigned to the ``self.stack`` list of elements.
When a statement like the above is being compiled, it normally
is also assigned to the ``.statement`` attribute of the
:class:`_sql.Compiler` object. However, all SQL constructs are
ultimately nestable, and this attribute should never be consulted
by a ``visit_`` method, as it is not guaranteed to be assigned
nor guaranteed to correspond to the current statement being compiled.
"""
try:
return self.stack[-1]["selectable"]
except IndexError as ie:
raise IndexError("Compiler does not have a stack entry") from ie
@property
def prefetch(self):
return list(self.insert_prefetch) + list(self.update_prefetch)
@util.memoized_property
def _global_attributes(self) -> Dict[Any, Any]:
return {}
def _add_to_params(self, item: ExecutableStatement) -> None:
# assumes that this is called before traversing the statement
# so the call happens outer to inner, meaning that existing params
# take precedence
if item._params:
self._collected_params = item._params | self._collected_params
@util.memoized_instancemethod
def _init_cte_state(self) -> MutableMapping[CTE, str]:
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
# To store the query to print - Dict[cte, text_query]
ctes: MutableMapping[CTE, str] = util.OrderedDict()
self.ctes = ctes
# Detect same CTE references - Dict[(level, name), cte]
# Level is required for supporting nesting
self.ctes_by_level_name = {}
# To retrieve key/level in ctes_by_level_name -
# Dict[cte_reference, (level, cte_name, cte_opts)]
self.level_name_by_cte = {}
self.ctes_recursive = False
return ctes
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns,
self._ordered_columns,
)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry["need_result_map_for_nested"] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop("need_result_map_for_nested")
self._result_columns, self._ordered_columns = (
result_columns,
ordered_columns,
)
def _process_positional(self):
assert not self.positiontup
assert self.state is CompilerState.STRING_APPLIED
assert not self._numeric_binds
if self.dialect.paramstyle == "format":
placeholder = "%s"
else:
assert self.dialect.paramstyle == "qmark"
placeholder = "?"
positions = []
def find_position(m: re.Match[str]) -> str:
normal_bind = m.group(1)
if normal_bind:
positions.append(normal_bind)
return placeholder
else:
# this a post-compile bind
positions.append(m.group(2))
return m.group(0)
self.string = re.sub(
self._positional_pattern, find_position, self.string
)
if self.escaped_bind_names:
reverse_escape = {v: k for k, v in self.escaped_bind_names.items()}
assert len(self.escaped_bind_names) == len(reverse_escape)
self.positiontup = [
reverse_escape.get(name, name) for name in positions
]
else:
self.positiontup = positions
if self._insertmanyvalues:
positions = []
single_values_expr = re.sub(
self._positional_pattern,
find_position,
self._insertmanyvalues.single_values_expr,
)
insert_crud_params = [
(
v[0],
v[1],
re.sub(self._positional_pattern, find_position, v[2]),
v[3],
)
for v in self._insertmanyvalues.insert_crud_params
]
self._insertmanyvalues = self._insertmanyvalues._replace(
single_values_expr=single_values_expr,
insert_crud_params=insert_crud_params,
)
def _process_numeric(self):
assert self._numeric_binds
assert self.state is CompilerState.STRING_APPLIED
num = 1
param_pos: Dict[str, str] = {}
order: Iterable[str]
if self._insertmanyvalues and self._values_bindparam is not None:
# bindparams that are not in values are always placed first.
# this avoids the need of changing them when using executemany
# values () ()
order = itertools.chain(
(
name
for name in self.bind_names.values()
if name not in self._values_bindparam
),
self.bind_names.values(),
)
else:
order = self.bind_names.values()
for bind_name in order:
if bind_name in param_pos:
continue
bind = self.binds[bind_name]
if (
bind in self.post_compile_params
or bind in self.literal_execute_params
):
# set to None to just mark the in positiontup, it will not
# be replaced below.
param_pos[bind_name] = None # type: ignore
else:
ph = f"{self._numeric_binds_identifier_char}{num}"
num += 1
param_pos[bind_name] = ph
self.next_numeric_pos = num
self.positiontup = list(param_pos)
if self.escaped_bind_names:
len_before = len(param_pos)
param_pos = {
self.escaped_bind_names.get(name, name): pos
for name, pos in param_pos.items()
}
assert len(param_pos) == len_before
# Can't use format here since % chars are not escaped.
self.string = self._pyformat_pattern.sub(
lambda m: param_pos[m.group(1)], self.string
)
if self._insertmanyvalues:
single_values_expr = (
# format is ok here since single_values_expr includes only
# place-holders
self._insertmanyvalues.single_values_expr
% param_pos
)
insert_crud_params = [
(v[0], v[1], "%s", v[3])
for v in self._insertmanyvalues.insert_crud_params
]
self._insertmanyvalues = self._insertmanyvalues._replace(
# This has the numbers (:1, :2)
single_values_expr=single_values_expr,
# The single binds are instead %s so they can be formatted
insert_crud_params=insert_crud_params,
)
@util.memoized_property
def _bind_processors(
self,
) -> MutableMapping[
str, Union[_BindProcessorType[Any], Sequence[_BindProcessorType[Any]]]
]:
# mypy is not able to see the two value types as the above Union,
# it just sees "object". don't know how to resolve
return {
key: value # type: ignore
for key, value in (
(
self.bind_names[bindparam],
(
bindparam.type._cached_bind_processor(self.dialect)
if not bindparam.type._is_tuple_type
else tuple(
elem_type._cached_bind_processor(self.dialect)
for elem_type in cast(
TupleType, bindparam.type
).types
)
),
)
for bindparam in self.bind_names
)
if value is not None
}
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self) -> Self:
return self
def construct_expanded_state(
self,
params: Optional[_CoreSingleExecuteParams] = None,
escape_names: bool = True,
) -> ExpandedState:
"""Return a new :class:`.ExpandedState` for a given parameter set.
For queries that use "expanding" or other late-rendered parameters,
this method will provide for both the finalized SQL string as well
as the parameters that would be used for a particular parameter set.
.. versionadded:: 2.0.0rc1
"""
parameters = self.construct_params(
params,
escape_names=escape_names,
_no_postcompile=True,
)
return self._process_parameters_for_postcompile(
parameters,
)
def construct_params(
self,
params: Optional[_CoreSingleExecuteParams] = None,
extracted_parameters: Optional[Sequence[BindParameter[Any]]] = None,
escape_names: bool = True,
_group_number: Optional[int] = None,
_check: bool = True,
_no_postcompile: bool = False,
_collected_params: _CoreSingleExecuteParams | None = None,
) -> _MutableCoreSingleExecuteParams:
"""return a dictionary of bind parameter keys and values"""
if _collected_params is not None:
assert not self._collect_params
elif self._collect_params:
_collected_params = self._collected_params
if _collected_params:
if not params:
params = _collected_params
else:
params = {**_collected_params, **params}
if self._render_postcompile and not _no_postcompile:
assert self._post_compile_expanded_state is not None
if not params:
return dict(self._post_compile_expanded_state.parameters)
else:
raise exc.InvalidRequestError(
"can't construct new parameters when render_postcompile "
"is used; the statement is hard-linked to the original "
"parameters. Use construct_expanded_state to generate a "
"new statement and parameters."
)
has_escaped_names = escape_names and bool(self.escaped_bind_names)
if extracted_parameters:
# related the bound parameters collected in the original cache key
# to those collected in the incoming cache key. They will not have
# matching names but they will line up positionally in the same
# way. The parameters present in self.bind_names may be clones of
# these original cache key params in the case of DML but the .key
# will be guaranteed to match.
if self.cache_key is None:
raise exc.CompileError(
"This compiled object has no original cache key; "
"can't pass extracted_parameters to construct_params"
)
else:
orig_extracted = self.cache_key[1]
ckbm_tuple = self._cache_key_bind_match
assert ckbm_tuple is not None
ckbm, _ = ckbm_tuple
resolved_extracted = {
bind: extracted
for b, extracted in zip(orig_extracted, extracted_parameters)
for bind in ckbm[b]
}
else:
resolved_extracted = None
if params:
pd = {}
for bindparam, name in self.bind_names.items():
escaped_name = (
self.escaped_bind_names.get(name, name)
if has_escaped_names
else name
)
if bindparam.key in params:
pd[escaped_name] = params[bindparam.key]
elif name in params:
pd[escaped_name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
else:
if resolved_extracted:
value_param = resolved_extracted.get(
bindparam, bindparam
)
else:
value_param = bindparam
if bindparam.callable:
pd[escaped_name] = value_param.effective_value
else:
pd[escaped_name] = value_param.value
return pd
else:
pd = {}
for bindparam, name in self.bind_names.items():
escaped_name = (
self.escaped_bind_names.get(name, name)
if has_escaped_names
else name
)
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
if resolved_extracted:
value_param = resolved_extracted.get(bindparam, bindparam)
else:
value_param = bindparam
if bindparam.callable:
pd[escaped_name] = value_param.effective_value
else:
pd[escaped_name] = value_param.value
return pd
@util.memoized_instancemethod
def _get_set_input_sizes_lookup(self):
dialect = self.dialect
include_types = dialect.include_set_input_sizes
exclude_types = dialect.exclude_set_input_sizes
dbapi = dialect.dbapi
def lookup_type(typ):
dbtype = typ._unwrapped_dialect_impl(dialect).get_dbapi_type(dbapi)
if (
dbtype is not None
and (exclude_types is None or dbtype not in exclude_types)
and (include_types is None or dbtype in include_types)
):
return dbtype
else:
return None
inputsizes = {}
literal_execute_params = self.literal_execute_params
for bindparam in self.bind_names:
if bindparam in literal_execute_params:
continue
if bindparam.type._is_tuple_type:
inputsizes[bindparam] = [
lookup_type(typ)
for typ in cast(TupleType, bindparam.type).types
]
else:
inputsizes[bindparam] = lookup_type(bindparam.type)
return inputsizes
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present.
.. seealso::
:ref:`faq_sql_expression_string` - includes a usage example for
debugging use cases.
"""
return self.construct_params(_check=False)
def _process_parameters_for_postcompile(
self,
parameters: _MutableCoreSingleExecuteParams,
_populate_self: bool = False,
) -> ExpandedState:
"""handle special post compile parameters.
These include:
* "expanding" parameters -typically IN tuples that are rendered
on a per-parameter basis for an otherwise fixed SQL statement string.
* literal_binds compiled with the literal_execute flag. Used for
things like SQL Server "TOP N" where the driver does not accommodate
N as a bound parameter.
"""
expanded_parameters = {}
new_positiontup: Optional[List[str]]
pre_expanded_string = self._pre_expanded_string
if pre_expanded_string is None:
pre_expanded_string = self.string
if self.positional:
new_positiontup = []
pre_expanded_positiontup = self._pre_expanded_positiontup
if pre_expanded_positiontup is None:
pre_expanded_positiontup = self.positiontup
else:
new_positiontup = pre_expanded_positiontup = None
processors = self._bind_processors
single_processors = cast(
"Mapping[str, _BindProcessorType[Any]]", processors
)
tuple_processors = cast(
"Mapping[str, Sequence[_BindProcessorType[Any]]]", processors
)
new_processors: Dict[str, _BindProcessorType[Any]] = {}
replacement_expressions: Dict[str, Any] = {}
to_update_sets: Dict[str, Any] = {}
# notes:
# *unescaped* parameter names in:
# self.bind_names, self.binds, self._bind_processors, self.positiontup
#
# *escaped* parameter names in:
# construct_params(), replacement_expressions
numeric_positiontup: Optional[List[str]] = None
if self.positional and pre_expanded_positiontup is not None:
names: Iterable[str] = pre_expanded_positiontup
if self._numeric_binds:
numeric_positiontup = []
else:
names = self.bind_names.values()
ebn = self.escaped_bind_names
for name in names:
escaped_name = ebn.get(name, name) if ebn else name
parameter = self.binds[name]
if parameter in self.literal_execute_params:
if escaped_name not in replacement_expressions:
replacement_expressions[escaped_name] = (
self.render_literal_bindparam(
parameter,
render_literal_value=parameters.pop(escaped_name),
)
)
continue
if parameter in self.post_compile_params:
if escaped_name in replacement_expressions:
to_update = to_update_sets[escaped_name]
values = None
else:
# we are removing the parameter from parameters
# because it is a list value, which is not expected by
# TypeEngine objects that would otherwise be asked to
# process it. the single name is being replaced with
# individual numbered parameters for each value in the
# param.
#
# note we are also inserting *escaped* parameter names
# into the given dictionary. default dialect will
# use these param names directly as they will not be
# in the escaped_bind_names dictionary.
values = parameters.pop(name)
leep_res = self._literal_execute_expanding_parameter(
escaped_name, parameter, values
)
(to_update, replacement_expr) = leep_res
to_update_sets[escaped_name] = to_update
replacement_expressions[escaped_name] = replacement_expr
if not parameter.literal_execute:
parameters.update(to_update)
if parameter.type._is_tuple_type:
assert values is not None
new_processors.update(
(
"%s_%s_%s" % (name, i, j),
tuple_processors[name][j - 1],
)
for i, tuple_element in enumerate(values, 1)
for j, _ in enumerate(tuple_element, 1)
if name in tuple_processors
and tuple_processors[name][j - 1] is not None
)
else:
new_processors.update(
(key, single_processors[name])
for key, _ in to_update
if name in single_processors
)
if numeric_positiontup is not None:
numeric_positiontup.extend(
name for name, _ in to_update
)
elif new_positiontup is not None:
# to_update has escaped names, but that's ok since
# these are new names, that aren't in the
# escaped_bind_names dict.
new_positiontup.extend(name for name, _ in to_update)
expanded_parameters[name] = [
expand_key for expand_key, _ in to_update
]
elif new_positiontup is not None:
new_positiontup.append(name)
def process_expanding(m):
key = m.group(1)
expr = replacement_expressions[key]
# if POSTCOMPILE included a bind_expression, render that
# around each element
if m.group(2):
tok = m.group(2).split("~~")
be_left, be_right = tok[1], tok[3]
expr = ", ".join(
"%s%s%s" % (be_left, exp, be_right)
for exp in expr.split(", ")
)
return expr
statement = re.sub(
self._post_compile_pattern, process_expanding, pre_expanded_string
)
if numeric_positiontup is not None:
assert new_positiontup is not None
param_pos = {
key: f"{self._numeric_binds_identifier_char}{num}"
for num, key in enumerate(
numeric_positiontup, self.next_numeric_pos
)
}
# Can't use format here since % chars are not escaped.
statement = self._pyformat_pattern.sub(
lambda m: param_pos[m.group(1)], statement
)
new_positiontup.extend(numeric_positiontup)
expanded_state = ExpandedState(
statement,
parameters,
new_processors,
new_positiontup,
expanded_parameters,
)
if _populate_self:
# this is for the "render_postcompile" flag, which is not
# otherwise used internally and is for end-user debugging and
# special use cases.
self._pre_expanded_string = pre_expanded_string
self._pre_expanded_positiontup = pre_expanded_positiontup
self.string = expanded_state.statement
self.positiontup = (
list(expanded_state.positiontup or ())
if self.positional
else None
)
self._post_compile_expanded_state = expanded_state
return expanded_state
@util.preload_module("sqlalchemy.engine.cursor")
def _create_result_map(self):
"""utility method used for unit tests only."""
cursor = util.preloaded.engine_cursor
return cursor.CursorResultMetaData._create_description_match_map(
self._result_columns
)
# assigned by crud.py for insert/update statements
_get_bind_name_for_col: _BindNameForColProtocol
@util.memoized_property
def _within_exec_param_key_getter(self) -> Callable[[Any], str]:
getter = self._get_bind_name_for_col
return getter
@util.memoized_property
@util.preload_module("sqlalchemy.engine.result")
def _inserted_primary_key_from_lastrowid_getter(self):
result = util.preloaded.engine_result
param_key_getter = self._within_exec_param_key_getter
assert self.compile_state is not None
statement = self.compile_state.statement
if TYPE_CHECKING:
assert isinstance(statement, Insert)
table = statement.table
getters = [
(operator.methodcaller("get", param_key_getter(col), None), col)
for col in table.primary_key
]
autoinc_getter = None
autoinc_col = table._autoincrement_column
if autoinc_col is not None:
# apply type post processors to the lastrowid
lastrowid_processor = autoinc_col.type._cached_result_processor(
self.dialect, None
)
autoinc_key = param_key_getter(autoinc_col)
# if a bind value is present for the autoincrement column
# in the parameters, we need to do the logic dictated by
# #7998; honor a non-None user-passed parameter over lastrowid.
# previously in the 1.4 series we weren't fetching lastrowid
# at all if the key were present in the parameters
if autoinc_key in self.binds:
def _autoinc_getter(lastrowid, parameters):
param_value = parameters.get(autoinc_key, lastrowid)
if param_value is not None:
# they supplied non-None parameter, use that.
# SQLite at least is observed to return the wrong
# cursor.lastrowid for INSERT..ON CONFLICT so it
# can't be used in all cases
return param_value
else:
# use lastrowid
return lastrowid
# work around mypy https://github.com/python/mypy/issues/14027
autoinc_getter = _autoinc_getter
else:
lastrowid_processor = None
row_fn = result.result_tuple([col.key for col in table.primary_key])
def get(lastrowid, parameters):
"""given cursor.lastrowid value and the parameters used for INSERT,
return a "row" that represents the primary key, either by
using the "lastrowid" or by extracting values from the parameters
that were sent along with the INSERT.
"""
if lastrowid_processor is not None:
lastrowid = lastrowid_processor(lastrowid)
if lastrowid is None:
return row_fn(getter(parameters) for getter, col in getters)
else:
return row_fn(
(
(
autoinc_getter(lastrowid, parameters)
if autoinc_getter is not None
else lastrowid
)
if col is autoinc_col
else getter(parameters)
)
for getter, col in getters
)
return get
@util.memoized_property
@util.preload_module("sqlalchemy.engine.result")
def _inserted_primary_key_from_returning_getter(self):
result = util.preloaded.engine_result
assert self.compile_state is not None
statement = self.compile_state.statement
if TYPE_CHECKING:
assert isinstance(statement, Insert)
param_key_getter = self._within_exec_param_key_getter
table = statement.table
returning = self.implicit_returning
assert returning is not None
ret = {col: idx for idx, col in enumerate(returning)}
getters = cast(
"List[Tuple[Callable[[Any], Any], bool]]",
[
(
(operator.itemgetter(ret[col]), True)
if col in ret
else (
operator.methodcaller(
"get", param_key_getter(col), None
),
False,
)
)
for col in table.primary_key
],
)
row_fn = result.result_tuple([col.key for col in table.primary_key])
def get(row, parameters):
return row_fn(
getter(row) if use_row else getter(parameters)
for getter, use_row in getters
)
return get
def default_from(self) -> str:
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle Database a chance to tack on a ``FROM DUAL`` to the string
output.
"""
return ""
def visit_override_binds(self, override_binds, **kw):
"""SQL compile the nested element of an _OverrideBinds with
bindparams swapped out.
The _OverrideBinds is not normally expected to be compiled; it
is meant to be used when an already cached statement is to be used,
the compilation was already performed, and only the bound params should
be swapped in at execution time.
However, there are test cases that exericise this object, and
additionally the ORM subquery loader is known to feed in expressions
which include this construct into new queries (discovered in #11173),
so it has to do the right thing at compile time as well.
"""
# get SQL text first
sqltext = override_binds.element._compiler_dispatch(self, **kw)
# for a test compile that is not for caching, change binds after the
# fact. note that we don't try to
# swap the bindparam as we compile, because our element may be
# elsewhere in the statement already (e.g. a subquery or perhaps a
# CTE) and was already visited / compiled. See
# test_relationship_criteria.py ->
# test_selectinload_local_criteria_subquery
for k in override_binds.translate:
if k not in self.binds:
continue
bp = self.binds[k]
# so this would work, just change the value of bp in place.
# but we dont want to mutate things outside.
# bp.value = override_binds.translate[bp.key]
# continue
# instead, need to replace bp with new_bp or otherwise accommodate
# in all internal collections
new_bp = bp._with_value(
override_binds.translate[bp.key],
maintain_key=True,
required=False,
)
name = self.bind_names[bp]
self.binds[k] = self.binds[name] = new_bp
self.bind_names[new_bp] = name
self.bind_names.pop(bp, None)
if bp in self.post_compile_params:
self.post_compile_params |= {new_bp}
if bp in self.literal_execute_params:
self.literal_execute_params |= {new_bp}
ckbm_tuple = self._cache_key_bind_match
if ckbm_tuple:
ckbm, cksm = ckbm_tuple
for bp in bp._cloned_set:
if bp.key in cksm:
cb = cksm[bp.key]
ckbm[cb].append(new_bp)
return sqltext
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_select_statement_grouping(self, grouping, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if self.stack and self.dialect.supports_simple_order_by_label:
try:
compile_state = cast(
"Union[SelectState, CompoundSelectState]",
self.stack[-1]["compile_state"],
)
except KeyError as ke:
raise exc.CompileError(
"Can't resolve label reference for ORDER BY / "
"GROUP BY / DISTINCT etc."
) from ke
(
with_cols,
only_froms,
only_cols,
) = compile_state._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = only_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if (
order_by_elem is not None
and order_by_elem.name in resolve_dict
and order_by_elem.shares_lineage(
resolve_dict[order_by_elem.name]
)
):
kwargs["render_label_as_label"] = (
element.element._order_by_label_element
)
return self.process(
element.element,
within_columns_clause=within_columns_clause,
**kwargs,
)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(element._text_clause)
try:
compile_state = cast(
"Union[SelectState, CompoundSelectState]",
self.stack[-1]["compile_state"],
)
except KeyError as ke:
coercions._no_text_coercion(
element.element,
extra=(
"Can't resolve label reference for ORDER BY / "
"GROUP BY / DISTINCT etc."
),
exc_cls=exc.CompileError,
err=ke,
)
with_cols, only_froms, only_cols = compile_state._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError as err:
coercions._no_text_coercion(
element.element,
extra=(
"Can't resolve label reference for ORDER BY / "
"GROUP BY / DISTINCT etc."
),
exc_cls=exc.CompileError,
err=err,
)
else:
kwargs["render_label_as_label"] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs
)
def visit_label(
self,
label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
result_map_targets=(),
**kw,
):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (
within_columns_clause and not within_label_clause
)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname) + label._alt_names + result_map_targets,
label.type,
)
return (
label.element._compiler_dispatch(
self,
within_columns_clause=True,
within_label_clause=True,
**kw,
)
+ OPERATORS[operators.as_]
+ self.preparer.format_label(label, labelname)
)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw
)
def _fallback_column_name(self, column):
raise exc.CompileError(
"Cannot compile Column object until its 'name' is assigned."
)
def visit_lambda_element(self, element, **kw):
sql_element = element._resolved
return self.process(sql_element, **kw)
def visit_column(
self,
column: ColumnClause[Any],
add_to_result_map: Optional[_ResultMapAppender] = None,
include_table: bool = True,
result_map_targets: Tuple[Any, ...] = (),
ambiguous_table_name_map: Optional[_AmbiguousTableNameMap] = None,
**kwargs: Any,
) -> str:
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
targets = (column, name, column.key) + result_map_targets
if column._tq_label:
targets += (column._tq_label,)
add_to_result_map(name, orig_name, targets, column.type)
if is_literal:
# note we are not currently accommodating for
# literal_column(quoted_name('ident', True)) here
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
effective_schema = self.preparer.schema_for_object(table)
if effective_schema:
schema_prefix = (
self.preparer.quote_schema(effective_schema) + "."
)
else:
schema_prefix = ""
if TYPE_CHECKING:
assert isinstance(table, NamedFromClause)
tablename = table.name
if (
not effective_schema
and ambiguous_table_name_map
and tablename in ambiguous_table_name_map
):
tablename = ambiguous_table_name_map[tablename]
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + self.preparer.quote(tablename) + "." + name
def visit_collation(self, element, **kw):
return self.preparer.format_collation(element.collation)
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw["type_expression"] = typeclause
kw["identifier_preparer"] = self.preparer
return self.dialect.type_compiler_instance.process(
typeclause.type, **kw
)
def post_process_text(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def escape_literal_column(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def visit_textclause(self, textclause, add_to_result_map=None, **kw):
if self._collect_params:
self._add_to_params(textclause)
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
if add_to_result_map:
# text() object is present in the columns clause of a
# select(). Add a no-name entry to the result map so that
# row[text()] produces a result
add_to_result_map(None, None, (textclause,), sqltypes.NULLTYPE)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam, self.post_process_text(textclause.text)
),
)
def visit_textual_select(
self, taf, compound_index=None, asfrom=False, **kw
):
if self._collect_params:
self._add_to_params(taf)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
new_entry: _CompilerStackEntry = {
"correlate_froms": set(),
"asfrom_froms": set(),
"selectable": taf,
}
self.stack.append(new_entry)
if taf._independent_ctes:
self._dispatch_independent_ctes(taf, kw)
populate_result_map = (
toplevel
or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
or entry.get("need_result_map_for_nested", False)
)
if populate_result_map:
self._ordered_columns = self._textual_ordered_columns = (
taf.positional
)
# enable looser result column matching when the SQL text links to
# Column objects by name only
self._loose_column_name_matching = not taf.positional and bool(
taf.column_args
)
for c in taf.column_args:
self.process(
c,
within_columns_clause=True,
add_to_result_map=self._add_to_result_map,
)
text = self.process(taf.element, **kw)
if self.ctes:
nesting_level = len(self.stack) if not toplevel else None
text = self._render_cte_clause(nesting_level=nesting_level) + text
self.stack.pop(-1)
return text
def visit_null(self, expr: Null, **kw: Any) -> str:
return "NULL"
def visit_true(self, expr: True_, **kw: Any) -> str:
if self.dialect.supports_native_boolean:
return "true"
else:
return "1"
def visit_false(self, expr: False_, **kw: Any) -> str:
if self.dialect.supports_native_boolean:
return "false"
else:
return "0"
def _generate_delimited_list(self, elements, separator, **kw):
return separator.join(
s
for s in (c._compiler_dispatch(self, **kw) for c in elements)
if s
)
def _generate_delimited_and_list(self, clauses, **kw):
lcc, clauses = elements.BooleanClauseList._process_clauses_for_boolean(
operators.and_,
elements.True_._singleton,
elements.False_._singleton,
clauses,
)
if lcc == 1:
return clauses[0]._compiler_dispatch(self, **kw)
else:
separator = OPERATORS[operators.and_]
return separator.join(
s
for s in (c._compiler_dispatch(self, **kw) for c in clauses)
if s
)
def visit_tuple(self, clauselist, **kw):
return "(%s)" % self.visit_clauselist(clauselist, **kw)
def visit_element_list(self, element, **kw):
return self._generate_delimited_list(element.clauses, " ", **kw)
def visit_order_by_list(self, element, **kw):
return self._generate_delimited_list(element.clauses, ", ", **kw)
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return self._generate_delimited_list(clauselist.clauses, sep, **kw)
def visit_expression_clauselist(self, clauselist, **kw):
operator_ = clauselist.operator
disp = self._get_operator_dispatch(
operator_, "expression_clauselist", None
)
if disp:
return disp(clauselist, operator_, **kw)
try:
opstring = OPERATORS[operator_]
except KeyError as err:
raise exc.UnsupportedCompilationError(self, operator_) from err
else:
kw["_in_operator_expression"] = True
return self._generate_delimited_list(
clauselist.clauses, opstring, **kw
)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += (
"WHEN "
+ cond._compiler_dispatch(self, **kwargs)
+ " THEN "
+ result._compiler_dispatch(self, **kwargs)
+ " "
)
if clause.else_ is not None:
x += (
"ELSE " + clause.else_._compiler_dispatch(self, **kwargs) + " "
)
x += "END"
return x
def visit_type_coerce(self, type_coerce, **kw):
return type_coerce.typed_expression._compiler_dispatch(self, **kw)
def visit_cast(self, cast, **kwargs):
type_clause = cast.typeclause._compiler_dispatch(self, **kwargs)
match = re.match("(.*)( COLLATE .*)", type_clause)
return "CAST(%s AS %s)%s" % (
cast.clause._compiler_dispatch(self, **kwargs),
match.group(1) if match else type_clause,
match.group(2) if match else "",
)
def visit_frame_clause(self, frameclause, **kw):
if frameclause.lower_type is elements.FrameClauseType.UNBOUNDED:
left = "UNBOUNDED PRECEDING"
elif frameclause.lower_type is elements.FrameClauseType.CURRENT:
left = "CURRENT ROW"
else:
val = self.process(frameclause.lower_bind, **kw)
if frameclause.lower_type is elements.FrameClauseType.PRECEDING:
left = f"{val} PRECEDING"
else:
left = f"{val} FOLLOWING"
if frameclause.upper_type is elements.FrameClauseType.UNBOUNDED:
right = "UNBOUNDED FOLLOWING"
elif frameclause.upper_type is elements.FrameClauseType.CURRENT:
right = "CURRENT ROW"
else:
val = self.process(frameclause.upper_bind, **kw)
if frameclause.upper_type is elements.FrameClauseType.PRECEDING:
right = f"{val} PRECEDING"
else:
right = f"{val} FOLLOWING"
return f"{left} AND {right}"
def visit_over(self, over, **kwargs):
text = over.element._compiler_dispatch(self, **kwargs)
if over.range_ is not None:
range_ = f"RANGE BETWEEN {self.process(over.range_, **kwargs)}"
elif over.rows is not None:
range_ = f"ROWS BETWEEN {self.process(over.rows, **kwargs)}"
elif over.groups is not None:
range_ = f"GROUPS BETWEEN {self.process(over.groups, **kwargs)}"
else:
range_ = None
return "%s OVER (%s)" % (
text,
" ".join(
[
"%s BY %s"
% (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
("PARTITION", over.partition_by),
("ORDER", over.order_by),
)
if clause is not None and len(clause)
]
+ ([range_] if range_ else [])
),
)
def visit_withingroup(self, withingroup, **kwargs):
return "%s WITHIN GROUP (ORDER BY %s)" % (
withingroup.element._compiler_dispatch(self, **kwargs),
withingroup.order_by._compiler_dispatch(self, **kwargs),
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs),
)
def visit_aggregateorderby(self, aggregateorderby, **kwargs):
if self.dialect.aggregate_order_by_style is AggregateOrderByStyle.NONE:
raise exc.CompileError(
"this dialect does not support "
"ORDER BY within an aggregate function"
)
elif (
self.dialect.aggregate_order_by_style
is AggregateOrderByStyle.INLINE
):
new_fn = aggregateorderby.element._clone()
new_fn.clause_expr = elements.Grouping(
aggregate_orderby_inline(
new_fn.clause_expr.element, aggregateorderby.order_by
)
)
return new_fn._compiler_dispatch(self, **kwargs)
else:
return self.visit_withingroup(aggregateorderby, **kwargs)
def visit_aggregate_orderby_inline(self, element, **kw):
return "%s ORDER BY %s" % (
self.process(element.element, **kw),
self.process(element.aggregate_order_by, **kw),
)
def visit_aggregate_strings_func(self, fn, *, use_function_name, **kw):
# aggreagate_order_by attribute is present if visit_function
# gave us a Function with aggregate_orderby_inline() as the inner
# contents
order_by = getattr(fn.clauses, "aggregate_order_by", None)
literal_exec = dict(kw)
literal_exec["literal_execute"] = True
# break up the function into its components so we can apply
# literal_execute to the second argument (the delimeter)
cl = list(fn.clauses)
expr, delimeter = cl[0:2]
if (
order_by is not None
and self.dialect.aggregate_order_by_style
is AggregateOrderByStyle.INLINE
):
return (
f"{use_function_name}({expr._compiler_dispatch(self, **kw)}, "
f"{delimeter._compiler_dispatch(self, **literal_exec)} "
f"ORDER BY {order_by._compiler_dispatch(self, **kw)})"
)
else:
return (
f"{use_function_name}({expr._compiler_dispatch(self, **kw)}, "
f"{delimeter._compiler_dispatch(self, **literal_exec)})"
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field,
extract.expr._compiler_dispatch(self, **kwargs),
)
def visit_scalar_function_column(self, element, **kw):
compiled_fn = self.visit_function(element.fn, **kw)
compiled_col = self.visit_column(element, **kw)
return "(%s).%s" % (compiled_fn, compiled_col)
def visit_function(
self,
func: Function[Any],
add_to_result_map: Optional[_ResultMapAppender] = None,
**kwargs: Any,
) -> str:
if self._collect_params:
self._add_to_params(func)
if add_to_result_map is not None:
add_to_result_map(func.name, func.name, (func.name,), func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
text: str
if disp:
text = disp(func, **kwargs)
else:
name = FUNCTIONS.get(func._deannotate().__class__, None)
if name:
if func._has_args:
name += "%(expr)s"
else:
name = func.name
name = (
self.preparer.quote(name)
if self.preparer._requires_quotes_illegal_chars(name)
or isinstance(name, elements.quoted_name)
else name
)
name = name + "%(expr)s"
text = ".".join(
[
(
self.preparer.quote(tok)
if self.preparer._requires_quotes_illegal_chars(tok)
or isinstance(name, elements.quoted_name)
else tok
)
for tok in func.packagenames
]
+ [name]
) % {"expr": self.function_argspec(func, **kwargs)}
if func._with_ordinality:
text += " WITH ORDINALITY"
return text
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence, **kw):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments."
% self.dialect.name
)
def function_argspec(self, func: Function[Any], **kwargs: Any) -> str:
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(
self, cs, asfrom=False, compound_index=None, **kwargs
):
if self._collect_params:
self._add_to_params(cs)
toplevel = not self.stack
compile_state = cs._compile_state_factory(cs, self, **kwargs)
if toplevel and not self.compile_state:
self.compile_state = compile_state
compound_stmt = compile_state.statement
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or (
not compound_index
and entry.get("need_result_map_for_compound", False)
)
# indicates there is already a CompoundSelect in play
if compound_index == 0:
entry["select_0"] = cs
self.stack.append(
{
"correlate_froms": entry["correlate_froms"],
"asfrom_froms": entry["asfrom_froms"],
"selectable": cs,
"compile_state": compile_state,
"need_result_map_for_compound": need_result_map,
}
)
if compound_stmt._independent_ctes:
self._dispatch_independent_ctes(compound_stmt, kwargs)
keyword = self.compound_keywords[cs.keyword]
text = (" " + keyword + " ").join(
(
c._compiler_dispatch(
self, asfrom=asfrom, compound_index=i, **kwargs
)
for i, c in enumerate(cs.selects)
)
)
kwargs["include_table"] = False
text += self.group_by_clause(cs, **dict(asfrom=asfrom, **kwargs))
text += self.order_by_clause(cs, **kwargs)
if cs._has_row_limiting_clause:
text += self._row_limit_clause(cs, **kwargs)
if self.ctes:
nesting_level = len(self.stack) if not toplevel else None
text = (
self._render_cte_clause(
nesting_level=nesting_level,
include_following_stack=True,
)
+ text
)
self.stack.pop(-1)
return text
def _row_limit_clause(self, cs, **kwargs):
if cs._fetch_clause is not None:
return self.fetch_clause(cs, **kwargs)
else:
return self.limit_clause(cs, **kwargs)
def _get_operator_dispatch(self, operator_, qualifier1, qualifier2):
attrname = "visit_%s_%s%s" % (
operator_.__name__,
qualifier1,
"_" + qualifier2 if qualifier2 else "",
)
return getattr(self, attrname, None)
def _get_custom_operator_dispatch(self, operator_, qualifier1):
attrname = "visit_%s_op_%s" % (operator_.visit_name, qualifier1)
return getattr(self, attrname, None)
def visit_unary(
self, unary, add_to_result_map=None, result_map_targets=(), **kw
):
if add_to_result_map is not None:
result_map_targets += (unary,)
kw["add_to_result_map"] = add_to_result_map
kw["result_map_targets"] = result_map_targets
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously"
)
disp = self._get_operator_dispatch(
unary.operator, "unary", "operator"
)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw
)
elif unary.modifier:
disp = self._get_operator_dispatch(
unary.modifier, "unary", "modifier"
)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw
)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier"
)
def visit_truediv_binary(self, binary, operator, **kw):
if self.dialect.div_is_floordiv:
return (
self.process(binary.left, **kw)
+ " / "
# TODO: would need a fast cast again here,
# unless we want to use an implicit cast like "+ 0.0"
+ self.process(
elements.Cast(
binary.right,
(
binary.right.type
if binary.right.type._type_affinity
in (sqltypes.Numeric, sqltypes.Float)
else sqltypes.Numeric()
),
),
**kw,
)
)
else:
return (
self.process(binary.left, **kw)
+ " / "
+ self.process(binary.right, **kw)
)
def visit_floordiv_binary(self, binary, operator, **kw):
if (
self.dialect.div_is_floordiv
and binary.right.type._type_affinity is sqltypes.Integer
):
return (
self.process(binary.left, **kw)
+ " / "
+ self.process(binary.right, **kw)
)
else:
return "FLOOR(%s)" % (
self.process(binary.left, **kw)
+ " / "
+ self.process(binary.right, **kw)
)
def visit_is_true_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_is_false_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_not_match_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op
)
def visit_not_in_op_binary(self, binary, operator, **kw):
# The brackets are required in the NOT IN operation because the empty
# case is handled using the form "(col NOT IN (null) OR 1 = 1)".
# The presence of the OR makes the brackets required.
return "(%s)" % self._generate_generic_binary(
binary, OPERATORS[operator], **kw
)
def visit_empty_set_op_expr(self, type_, expand_op, **kw):
if expand_op is operators.not_in_op:
if len(type_) > 1:
return "(%s)) OR (1 = 1" % (
", ".join("NULL" for element in type_)
)
else:
return "NULL) OR (1 = 1"
elif expand_op is operators.in_op:
if len(type_) > 1:
return "(%s)) AND (1 != 1" % (
", ".join("NULL" for element in type_)
)
else:
return "NULL) AND (1 != 1"
else:
return self.visit_empty_set_expr(type_)
def visit_empty_set_expr(self, element_types, **kw):
raise NotImplementedError(
"Dialect '%s' does not support empty set expression."
% self.dialect.name
)
def _literal_execute_expanding_parameter_literal_binds(
self, parameter, values, bind_expression_template=None
):
typ_dialect_impl = parameter.type._unwrapped_dialect_impl(self.dialect)
if not values:
# empty IN expression. note we don't need to use
# bind_expression_template here because there are no
# expressions to render.
if typ_dialect_impl._is_tuple_type:
replacement_expression = (
"VALUES " if self.dialect.tuple_in_values else ""
) + self.visit_empty_set_op_expr(
parameter.type.types, parameter.expand_op
)
else:
replacement_expression = self.visit_empty_set_op_expr(
[parameter.type], parameter.expand_op
)
elif typ_dialect_impl._is_tuple_type or (
typ_dialect_impl._isnull
and isinstance(values[0], collections_abc.Sequence)
and not isinstance(values[0], (str, bytes))
):
if typ_dialect_impl._has_bind_expression:
raise NotImplementedError(
"bind_expression() on TupleType not supported with "
"literal_binds"
)
replacement_expression = (
"VALUES " if self.dialect.tuple_in_values else ""
) + ", ".join(
"(%s)"
% (
", ".join(
self.render_literal_value(value, param_type)
for value, param_type in zip(
tuple_element, parameter.type.types
)
)
)
for i, tuple_element in enumerate(values)
)
else:
if bind_expression_template:
post_compile_pattern = self._post_compile_pattern
m = post_compile_pattern.search(bind_expression_template)
assert m and m.group(
2
), "unexpected format for expanding parameter"
tok = m.group(2).split("~~")
be_left, be_right = tok[1], tok[3]
replacement_expression = ", ".join(
"%s%s%s"
% (
be_left,
self.render_literal_value(value, parameter.type),
be_right,
)
for value in values
)
else:
replacement_expression = ", ".join(
self.render_literal_value(value, parameter.type)
for value in values
)
return (), replacement_expression
def _literal_execute_expanding_parameter(self, name, parameter, values):
if parameter.literal_execute:
return self._literal_execute_expanding_parameter_literal_binds(
parameter, values
)
dialect = self.dialect
typ_dialect_impl = parameter.type._unwrapped_dialect_impl(dialect)
if self._numeric_binds:
bind_template = self.compilation_bindtemplate
else:
bind_template = self.bindtemplate
if (
self.dialect._bind_typing_render_casts
and typ_dialect_impl.render_bind_cast
):
def _render_bindtemplate(name):
return self.render_bind_cast(
parameter.type,
typ_dialect_impl,
bind_template % {"name": name},
)
else:
def _render_bindtemplate(name):
return bind_template % {"name": name}
if not values:
to_update = []
if typ_dialect_impl._is_tuple_type:
replacement_expression = self.visit_empty_set_op_expr(
parameter.type.types, parameter.expand_op
)
else:
replacement_expression = self.visit_empty_set_op_expr(
[parameter.type], parameter.expand_op
)
elif typ_dialect_impl._is_tuple_type or (
typ_dialect_impl._isnull
and isinstance(values[0], collections_abc.Sequence)
and not isinstance(values[0], (str, bytes))
):
assert not typ_dialect_impl._is_array
to_update = [
("%s_%s_%s" % (name, i, j), value)
for i, tuple_element in enumerate(values, 1)
for j, value in enumerate(tuple_element, 1)
]
replacement_expression = (
"VALUES " if dialect.tuple_in_values else ""
) + ", ".join(
"(%s)"
% (
", ".join(
_render_bindtemplate(
to_update[i * len(tuple_element) + j][0]
)
for j, value in enumerate(tuple_element)
)
)
for i, tuple_element in enumerate(values)
)
else:
to_update = [
("%s_%s" % (name, i), value)
for i, value in enumerate(values, 1)
]
replacement_expression = ", ".join(
_render_bindtemplate(key) for key, value in to_update
)
return to_update, replacement_expression
def visit_binary(
self,
binary,
override_operator=None,
eager_grouping=False,
from_linter=None,
lateral_from_linter=None,
**kw,
):
if from_linter and operators.is_comparison(binary.operator):
if lateral_from_linter is not None:
enclosing_lateral = kw["enclosing_lateral"]
lateral_from_linter.edges.update(
itertools.product(
_de_clone(
binary.left._from_objects + [enclosing_lateral]
),
_de_clone(
binary.right._from_objects + [enclosing_lateral]
),
)
)
else:
from_linter.edges.update(
itertools.product(
_de_clone(binary.left._from_objects),
_de_clone(binary.right._from_objects),
)
)
# don't allow "? = ?" to render
if (
self.ansi_bind_rules
and isinstance(binary.left, elements.BindParameter)
and isinstance(binary.right, elements.BindParameter)
):
kw["literal_execute"] = True
operator_ = override_operator or binary.operator
disp = self._get_operator_dispatch(operator_, "binary", None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError as err:
raise exc.UnsupportedCompilationError(self, operator_) from err
else:
return self._generate_generic_binary(
binary,
opstring,
from_linter=from_linter,
lateral_from_linter=lateral_from_linter,
**kw,
)
def visit_function_as_comparison_op_binary(self, element, operator, **kw):
return self.process(element.sql_function, **kw)
def visit_mod_binary(self, binary, operator, **kw):
if self.preparer._double_percents:
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
else:
return (
self.process(binary.left, **kw)
+ " % "
+ self.process(binary.right, **kw)
)
def visit_custom_op_binary(self, element, operator, **kw):
if operator.visit_name:
disp = self._get_custom_operator_dispatch(operator, "binary")
if disp:
return disp(element, operator, **kw)
kw["eager_grouping"] = operator.eager_grouping
return self._generate_generic_binary(
element,
" " + self.escape_literal_column(operator.opstring) + " ",
**kw,
)
def visit_custom_op_unary_operator(self, element, operator, **kw):
if operator.visit_name:
disp = self._get_custom_operator_dispatch(operator, "unary")
if disp:
return disp(element, operator, **kw)
return self._generate_generic_unary_operator(
element, self.escape_literal_column(operator.opstring) + " ", **kw
)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
if operator.visit_name:
disp = self._get_custom_operator_dispatch(operator, "unary")
if disp:
return disp(element, operator, **kw)
return self._generate_generic_unary_modifier(
element, " " + self.escape_literal_column(operator.opstring), **kw
)
def _generate_generic_binary(
self,
binary: BinaryExpression[Any],
opstring: str,
eager_grouping: bool = False,
**kw: Any,
) -> str:
_in_operator_expression = kw.get("_in_operator_expression", False)
kw["_in_operator_expression"] = True
kw["_binary_op"] = binary.operator
text = (
binary.left._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
+ opstring
+ binary.right._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
)
if _in_operator_expression and eager_grouping:
text = "(%s)" % text
return text
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_ilike_case_insensitive_operand(self, element, **kw):
return f"lower({element.element._compiler_dispatch(self, **kw)})"
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.concat(binary.right).concat(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_not_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.concat(binary.right).concat(percent)
return self.visit_not_like_op_binary(binary, operator, **kw)
def visit_icontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.left = ilike_case_insensitive(binary.left)
binary.right = percent.concat(
ilike_case_insensitive(binary.right)
).concat(percent)
return self.visit_ilike_op_binary(binary, operator, **kw)
def visit_not_icontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.left = ilike_case_insensitive(binary.left)
binary.right = percent.concat(
ilike_case_insensitive(binary.right)
).concat(percent)
return self.visit_not_ilike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent._rconcat(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_not_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent._rconcat(binary.right)
return self.visit_not_like_op_binary(binary, operator, **kw)
def visit_istartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.left = ilike_case_insensitive(binary.left)
binary.right = percent._rconcat(ilike_case_insensitive(binary.right))
return self.visit_ilike_op_binary(binary, operator, **kw)
def visit_not_istartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.left = ilike_case_insensitive(binary.left)
binary.right = percent._rconcat(ilike_case_insensitive(binary.right))
return self.visit_not_ilike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.concat(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_not_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.concat(binary.right)
return self.visit_not_like_op_binary(binary, operator, **kw)
def visit_iendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.left = ilike_case_insensitive(binary.left)
binary.right = percent.concat(ilike_case_insensitive(binary.right))
return self.visit_ilike_op_binary(binary, operator, **kw)
def visit_not_iendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.left = ilike_case_insensitive(binary.left)
binary.right = percent.concat(ilike_case_insensitive(binary.right))
return self.visit_not_ilike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape is not None
else ""
)
def visit_not_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s NOT LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape is not None
else ""
)
def visit_ilike_op_binary(self, binary, operator, **kw):
if operator is operators.ilike_op:
binary = binary._clone()
binary.left = ilike_case_insensitive(binary.left)
binary.right = ilike_case_insensitive(binary.right)
# else we assume ilower() has been applied
return self.visit_like_op_binary(binary, operator, **kw)
def visit_not_ilike_op_binary(self, binary, operator, **kw):
if operator is operators.not_ilike_op:
binary = binary._clone()
binary.left = ilike_case_insensitive(binary.left)
binary.right = ilike_case_insensitive(binary.right)
# else we assume ilower() has been applied
return self.visit_not_like_op_binary(binary, operator, **kw)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC " if symmetric else " BETWEEN ", **kw
)
def visit_not_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary,
" NOT BETWEEN SYMMETRIC " if symmetric else " NOT BETWEEN ",
**kw,
)
def visit_regexp_match_op_binary(
self, binary: BinaryExpression[Any], operator: Any, **kw: Any
) -> str:
raise exc.CompileError(
"%s dialect does not support regular expressions"
% self.dialect.name
)
def visit_not_regexp_match_op_binary(
self, binary: BinaryExpression[Any], operator: Any, **kw: Any
) -> str:
raise exc.CompileError(
"%s dialect does not support regular expressions"
% self.dialect.name
)
def visit_regexp_replace_op_binary(
self, binary: BinaryExpression[Any], operator: Any, **kw: Any
) -> str:
raise exc.CompileError(
"%s dialect does not support regular expression replacements"
% self.dialect.name
)
def visit_dmltargetcopy(self, element, *, bindmarkers=None, **kw):
if bindmarkers is None:
raise exc.CompileError(
"DML target objects may only be used with "
"compiled INSERT or UPDATE statements"
)
bindmarkers[element.column.key] = element
return f"__BINDMARKER_~~{element.column.key}~~"
def visit_bindparam(
self,
bindparam,
within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
literal_execute=False,
render_postcompile=False,
**kwargs,
):
if not skip_bind_expression:
impl = bindparam.type.dialect_impl(self.dialect)
if impl._has_bind_expression:
bind_expression = impl.bind_expression(bindparam)
wrapped = self.process(
bind_expression,
skip_bind_expression=True,
within_columns_clause=within_columns_clause,
literal_binds=literal_binds and not bindparam.expanding,
literal_execute=literal_execute,
render_postcompile=render_postcompile,
**kwargs,
)
if bindparam.expanding:
# for postcompile w/ expanding, move the "wrapped" part
# of this into the inside
m = re.match(
r"^(.*)\(__\[POSTCOMPILE_(\S+?)\]\)(.*)$", wrapped
)
assert m, "unexpected format for expanding parameter"
wrapped = "(__[POSTCOMPILE_%s~~%s~~REPL~~%s~~])" % (
m.group(2),
m.group(1),
m.group(3),
)
if literal_binds:
ret = self.render_literal_bindparam(
bindparam,
within_columns_clause=True,
bind_expression_template=wrapped,
**kwargs,
)
return f"({ret})"
return wrapped
if not literal_binds:
literal_execute = (
literal_execute
or bindparam.literal_execute
or (within_columns_clause and self.ansi_bind_rules)
)
post_compile = literal_execute or bindparam.expanding
else:
post_compile = False
if literal_binds:
ret = self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs
)
if bindparam.expanding:
ret = f"({ret})"
return ret
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (
(existing.unique or bindparam.unique)
and not existing.proxy_set.intersection(
bindparam.proxy_set
)
and not existing._cloned_set.intersection(
bindparam._cloned_set
)
):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" % name
)
elif existing.expanding != bindparam.expanding:
raise exc.CompileError(
"Can't reuse bound parameter name '%s' in both "
"'expanding' (e.g. within an IN expression) and "
"non-expanding contexts. If this parameter is to "
"receive a list/array value, set 'expanding=True' on "
"it for expressions that aren't IN, otherwise use "
"a different parameter name." % (name,)
)
elif existing._is_crud or bindparam._is_crud:
if existing._is_crud and bindparam._is_crud:
# TODO: this condition is not well understood.
# see tests in test/sql/test_update.py
raise exc.CompileError(
"Encountered unsupported case when compiling an "
"INSERT or UPDATE statement. If this is a "
"multi-table "
"UPDATE statement, please provide string-named "
"arguments to the "
"values() method with distinct names; support for "
"multi-table UPDATE statements that "
"target multiple tables for UPDATE is very "
"limited",
)
else:
raise exc.CompileError(
f"bindparam() name '{bindparam.key}' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using "
"bindparam() "
"with insert() or update() (for example, "
f"'b_{bindparam.key}')."
)
self.binds[bindparam.key] = self.binds[name] = bindparam
# if we are given a cache key that we're going to match against,
# relate the bindparam here to one that is most likely present
# in the "extracted params" portion of the cache key. this is used
# to set up a positional mapping that is used to determine the
# correct parameters for a subsequent use of this compiled with
# a different set of parameter values. here, we accommodate for
# parameters that may have been cloned both before and after the cache
# key was been generated.
ckbm_tuple = self._cache_key_bind_match
if ckbm_tuple:
ckbm, cksm = ckbm_tuple
for bp in bindparam._cloned_set:
if bp.key in cksm:
cb = cksm[bp.key]
ckbm[cb].append(bindparam)
if bindparam.isoutparam:
self.has_out_parameters = True
if post_compile:
if render_postcompile:
self._render_postcompile = True
if literal_execute:
self.literal_execute_params |= {bindparam}
else:
self.post_compile_params |= {bindparam}
ret = self.bindparam_string(
name,
post_compile=post_compile,
expanding=bindparam.expanding,
bindparam_type=bindparam.type,
**kwargs,
)
if bindparam.expanding:
ret = f"({ret})"
return ret
def render_bind_cast(self, type_, dbapi_type, sqltext):
raise NotImplementedError()
def render_literal_bindparam(
self,
bindparam,
render_literal_value=NO_ARG,
bind_expression_template=None,
**kw,
):
if render_literal_value is not NO_ARG:
value = render_literal_value
else:
if bindparam.value is None and bindparam.callable is None:
op = kw.get("_binary_op", None)
if op and op not in (operators.is_, operators.is_not):
util.warn_limited(
"Bound parameter '%s' rendering literal NULL in a SQL "
"expression; comparisons to NULL should not use "
"operators outside of 'is' or 'is not'",
(bindparam.key,),
)
return self.process(sqltypes.NULLTYPE, **kw)
value = bindparam.effective_value
if bindparam.expanding:
leep = self._literal_execute_expanding_parameter_literal_binds
to_update, replacement_expr = leep(
bindparam,
value,
bind_expression_template=bind_expression_template,
)
return replacement_expr
else:
return self.render_literal_value(value, bindparam.type)
def render_literal_value(
self, value: Any, type_: sqltypes.TypeEngine[Any]
) -> str:
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
if value is None and not type_.should_evaluate_none:
# issue #10535 - handle NULL in the compiler without placing
# this onto each type, except for "evaluate None" types
# (e.g. JSON)
return self.process(elements.Null._instance())
processor = type_._cached_literal_processor(self.dialect)
if processor:
try:
return processor(value)
except Exception as e:
raise exc.CompileError(
f"Could not render literal value "
f'"{sql_util._repr_single_value(value)}" '
f"with datatype "
f"{type_}; see parent stack trace for "
"more detail."
) from e
else:
raise exc.CompileError(
f"No literal value renderer is available for literal value "
f'"{sql_util._repr_single_value(value)}" '
f"with datatype {type_}"
)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(
self, ident_class: str, name: _truncated_label
) -> str:
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self._truncated_counters.get(ident_class, 1)
truncname = (
anonname[0 : max(self.label_length - 6, 0)]
+ "_"
+ hex(counter)[2:]
)
self._truncated_counters[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name: str) -> str:
return name % self.anon_map
def bindparam_string(
self,
name: str,
post_compile: bool = False,
expanding: bool = False,
escaped_from: Optional[str] = None,
bindparam_type: Optional[TypeEngine[Any]] = None,
accumulate_bind_names: Optional[Set[str]] = None,
visited_bindparam: Optional[List[str]] = None,
**kw: Any,
) -> str:
# TODO: accumulate_bind_names is passed by crud.py to gather
# names on a per-value basis, visited_bindparam is passed by
# visit_insert() to collect all parameters in the statement.
# see if this gathering can be simplified somehow
if accumulate_bind_names is not None:
accumulate_bind_names.add(name)
if visited_bindparam is not None:
visited_bindparam.append(name)
if not escaped_from:
if self._bind_translate_re.search(name):
# not quite the translate use case as we want to
# also get a quick boolean if we even found
# unusual characters in the name
new_name = self._bind_translate_re.sub(
lambda m: self._bind_translate_chars[m.group(0)],
name,
)
escaped_from = name
name = new_name
if escaped_from:
self.escaped_bind_names = self.escaped_bind_names.union(
{escaped_from: name}
)
if post_compile:
ret = "__[POSTCOMPILE_%s]" % name
if expanding:
# for expanding, bound parameters or literal values will be
# rendered per item
return ret
# otherwise, for non-expanding "literal execute", apply
# bind casts as determined by the datatype
if bindparam_type is not None:
type_impl = bindparam_type._unwrapped_dialect_impl(
self.dialect
)
if type_impl.render_literal_cast:
ret = self.render_bind_cast(bindparam_type, type_impl, ret)
return ret
elif self.state is CompilerState.COMPILING:
ret = self.compilation_bindtemplate % {"name": name}
else:
ret = self.bindtemplate % {"name": name}
if (
bindparam_type is not None
and self.dialect._bind_typing_render_casts
):
type_impl = bindparam_type._unwrapped_dialect_impl(self.dialect)
if type_impl.render_bind_cast:
ret = self.render_bind_cast(bindparam_type, type_impl, ret)
return ret
def _dispatch_independent_ctes(self, stmt, kw):
local_kw = kw.copy()
local_kw.pop("cte_opts", None)
for cte, opt in zip(
stmt._independent_ctes, stmt._independent_ctes_opts
):
cte._compiler_dispatch(self, cte_opts=opt, **local_kw)
def visit_cte(
self,
cte: CTE,
asfrom: bool = False,
ashint: bool = False,
fromhints: Optional[_FromHintsType] = None,
visiting_cte: Optional[CTE] = None,
from_linter: Optional[FromLinter] = None,
cte_opts: selectable._CTEOpts = selectable._CTEOpts(False),
**kwargs: Any,
) -> Optional[str]:
self_ctes = self._init_cte_state()
assert self_ctes is self.ctes
kwargs["visiting_cte"] = cte
cte_name = cte.name
if isinstance(cte_name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte_name)
is_new_cte = True
embedded_in_current_named_cte = False
_reference_cte = cte._get_reference_cte()
nesting = cte.nesting or cte_opts.nesting
# check for CTE already encountered
if _reference_cte in self.level_name_by_cte:
cte_level, _, existing_cte_opts = self.level_name_by_cte[
_reference_cte
]
assert _ == cte_name
cte_level_name = (cte_level, cte_name)
existing_cte = self.ctes_by_level_name[cte_level_name]
# check if we are receiving it here with a specific
# "nest_here" location; if so, move it to this location
if cte_opts.nesting:
if existing_cte_opts.nesting:
raise exc.CompileError(
"CTE is stated as 'nest_here' in "
"more than one location"
)
old_level_name = (cte_level, cte_name)
cte_level = len(self.stack) if nesting else 1
cte_level_name = new_level_name = (cte_level, cte_name)
del self.ctes_by_level_name[old_level_name]
self.ctes_by_level_name[new_level_name] = existing_cte
self.level_name_by_cte[_reference_cte] = new_level_name + (
cte_opts,
)
else:
cte_level = len(self.stack) if nesting else 1
cte_level_name = (cte_level, cte_name)
if cte_level_name in self.ctes_by_level_name:
existing_cte = self.ctes_by_level_name[cte_level_name]
else:
existing_cte = None
if existing_cte is not None:
embedded_in_current_named_cte = visiting_cte is existing_cte
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte is existing_cte._restates or cte is existing_cte:
is_new_cte = False
elif existing_cte is cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self_ctes[existing_cte]
existing_cte_reference_cte = existing_cte._get_reference_cte()
assert existing_cte_reference_cte is _reference_cte
assert existing_cte_reference_cte is existing_cte
del self.level_name_by_cte[existing_cte_reference_cte]
else:
if (
# if the two CTEs have the same hash, which we expect
# here means that one/both is an annotated of the other
(hash(cte) == hash(existing_cte))
# or...
or (
(
# if they are clones, i.e. they came from the ORM
# or some other visit method
cte._is_clone_of is not None
or existing_cte._is_clone_of is not None
)
# and are deep-copy identical
and cte.compare(existing_cte)
)
):
# then consider these two CTEs the same
is_new_cte = False
else:
# otherwise these are two CTEs that either will render
# differently, or were indicated separately by the user,
# with the same name
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" % cte_name
)
if not asfrom and not is_new_cte:
return None
if cte._cte_alias is not None:
pre_alias_cte = cte._cte_alias
cte_pre_alias_name = cte._cte_alias.name
if isinstance(cte_pre_alias_name, elements._truncated_label):
cte_pre_alias_name = self._truncated_identifier(
"alias", cte_pre_alias_name
)
else:
pre_alias_cte = cte
cte_pre_alias_name = None
if is_new_cte:
self.ctes_by_level_name[cte_level_name] = cte
self.level_name_by_cte[_reference_cte] = cte_level_name + (
cte_opts,
)
if pre_alias_cte not in self.ctes:
self.visit_cte(pre_alias_cte, **kwargs)
if not cte_pre_alias_name and cte not in self_ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive or cte.element.name_cte_columns:
col_source = cte.element
# TODO: can we get at the .columns_plus_names collection
# that is already (or will be?) generated for the SELECT
# rather than calling twice?
recur_cols = [
# TODO: proxy_name is not technically safe,
# see test_cte->
# test_with_recursive_no_name_currently_buggy. not
# clear what should be done with such a case
fallback_label_name or proxy_name
for (
_,
proxy_name,
fallback_label_name,
c,
repeated,
) in (col_source._generate_columns_plus_names(True))
if not repeated
]
text += "(%s)" % (
", ".join(
self.preparer.format_label_name(
ident, anon_map=self.anon_map
)
for ident in recur_cols
)
)
assert kwargs.get("subquery", False) is False
if not self.stack:
# toplevel, this is a stringify of the
# cte directly. just compile the inner
# the way alias() does.
return cte.element._compiler_dispatch(
self, asfrom=asfrom, **kwargs
)
else:
prefixes = self._generate_prefixes(
cte, cte._prefixes, **kwargs
)
inner = cte.element._compiler_dispatch(
self, asfrom=True, **kwargs
)
text += " AS %s\n(%s)" % (prefixes, inner)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs
)
self_ctes[cte] = text
if asfrom:
if from_linter:
from_linter.froms[cte._de_clone()] = cte_name
if not is_new_cte and embedded_in_current_named_cte:
return self.preparer.format_alias(cte, cte_name)
if cte_pre_alias_name:
text = self.preparer.format_alias(cte, cte_pre_alias_name)
if self.preparer._requires_quotes(cte_name):
cte_name = self.preparer.quote(cte_name)
text += self.get_render_as_alias_suffix(cte_name)
return text # type: ignore[no-any-return]
else:
return self.preparer.format_alias(cte, cte_name)
return None
def visit_table_valued_alias(self, element, **kw):
if element.joins_implicitly:
kw["from_linter"] = None
if element._is_lateral:
return self.visit_lateral(element, **kw)
else:
return self.visit_alias(element, **kw)
def visit_table_valued_column(self, element, **kw):
return self.visit_column(element, **kw)
def visit_alias(
self,
alias,
asfrom=False,
ashint=False,
iscrud=False,
fromhints=None,
subquery=False,
lateral=False,
enclosing_alias=None,
from_linter=None,
**kwargs,
):
if lateral:
if "enclosing_lateral" not in kwargs:
# if lateral is set and enclosing_lateral is not
# present, we assume we are being called directly
# from visit_lateral() and we need to set enclosing_lateral.
assert alias._is_lateral
kwargs["enclosing_lateral"] = alias
# for lateral objects, we track a second from_linter that is...
# lateral! to the level above us.
if (
from_linter
and "lateral_from_linter" not in kwargs
and "enclosing_lateral" in kwargs
):
kwargs["lateral_from_linter"] = from_linter
if enclosing_alias is not None and enclosing_alias.element is alias:
inner = alias.element._compiler_dispatch(
self,
asfrom=asfrom,
ashint=ashint,
iscrud=iscrud,
fromhints=fromhints,
lateral=lateral,
enclosing_alias=alias,
**kwargs,
)
if subquery and (asfrom or lateral):
inner = "(%s)" % (inner,)
return inner
else:
kwargs["enclosing_alias"] = alias
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
if from_linter:
from_linter.froms[alias._de_clone()] = alias_name
inner = alias.element._compiler_dispatch(
self, asfrom=True, lateral=lateral, **kwargs
)
if subquery:
inner = "(%s)" % (inner,)
ret = inner + self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name)
)
if alias._supports_derived_columns and alias._render_derived:
ret += "(%s)" % (
", ".join(
"%s%s"
% (
self.preparer.quote(col.name),
(
" %s"
% self.dialect.type_compiler_instance.process(
col.type, **kwargs
)
if alias._render_derived_w_types
else ""
),
)
for col in alias.c
)
)
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(
ret, alias, fromhints[alias], iscrud
)
return ret
else:
# note we cancel the "subquery" flag here as well
return alias.element._compiler_dispatch(
self, lateral=lateral, **kwargs
)
def visit_subquery(self, subquery, **kw):
kw["subquery"] = True
return self.visit_alias(subquery, **kw)
def visit_lateral(self, lateral_, **kw):
kw["lateral"] = True
return "LATERAL %s" % self.visit_alias(lateral_, **kw)
def visit_tablesample(self, tablesample, asfrom=False, **kw):
text = "%s TABLESAMPLE %s" % (
self.visit_alias(tablesample, asfrom=True, **kw),
tablesample._get_method()._compiler_dispatch(self, **kw),
)
if tablesample.seed is not None:
text += " REPEATABLE (%s)" % (
tablesample.seed._compiler_dispatch(self, **kw)
)
return text
def _render_values(self, element, **kw):
kw.setdefault("literal_binds", element.literal_binds)
tuples = ", ".join(
self.process(
elements.Tuple(
types=element._column_types, *elem
).self_group(),
**kw,
)
for chunk in element._data
for elem in chunk
)
return f"VALUES {tuples}"
def visit_values(
self, element, asfrom=False, from_linter=None, visiting_cte=None, **kw
):
if element._independent_ctes:
self._dispatch_independent_ctes(element, kw)
v = self._render_values(element, **kw)
if element._unnamed:
name = None
elif isinstance(element.name, elements._truncated_label):
name = self._truncated_identifier("values", element.name)
else:
name = element.name
if element._is_lateral:
lateral = "LATERAL "
else:
lateral = ""
if asfrom:
if from_linter:
from_linter.froms[element._de_clone()] = (
name if name is not None else "(unnamed VALUES element)"
)
if visiting_cte is not None and visiting_cte.element is element:
if element._is_lateral:
raise exc.CompileError(
"Can't use a LATERAL VALUES expression inside of a CTE"
)
elif name:
kw["include_table"] = False
v = "%s(%s)%s (%s)" % (
lateral,
v,
self.get_render_as_alias_suffix(self.preparer.quote(name)),
(
", ".join(
c._compiler_dispatch(self, **kw)
for c in element.columns
)
),
)
else:
v = "%s(%s)" % (lateral, v)
return v
def visit_scalar_values(self, element, **kw):
return f"({self._render_values(element, **kw)})"
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(
self,
keyname: str,
name: str,
objects: Tuple[Any, ...],
type_: TypeEngine[Any],
) -> None:
# note objects must be non-empty for cursor.py to handle the
# collection properly
assert objects
if keyname is None or keyname == "*":
self._ordered_columns = False
self._ad_hoc_textual = True
if type_._is_tuple_type:
raise exc.CompileError(
"Most backends don't support SELECTing "
"from a tuple() object. If this is an ORM query, "
"consider using the Bundle object."
)
self._result_columns.append(
ResultColumnsEntry(keyname, name, objects, type_)
)
def _label_returning_column(
self, stmt, column, populate_result_map, column_clause_args=None, **kw
):
"""Render a column with necessary labels inside of a RETURNING clause.
This method is provided for individual dialects in place of calling
the _label_select_column method directly, so that the two use cases
of RETURNING vs. SELECT can be disambiguated going forward.
.. versionadded:: 1.4.21
"""
return self._label_select_column(
None,
column,
populate_result_map,
False,
{} if column_clause_args is None else column_clause_args,
**kw,
)
def _label_select_column(
self,
select,
column,
populate_result_map,
asfrom,
column_clause_args,
name=None,
proxy_name=None,
fallback_label_name=None,
within_columns_clause=True,
column_is_repeated=False,
need_column_expressions=False,
include_table=True,
):
"""produce labeled columns present in a select()."""
impl = column.type.dialect_impl(self.dialect)
if impl._has_column_expression and (
need_column_expressions or populate_result_map
):
col_expr = impl.column_expression(column)
else:
col_expr = column
if populate_result_map:
# pass an "add_to_result_map" callable into the compilation
# of embedded columns. this collects information about the
# column as it will be fetched in the result and is coordinated
# with cursor.description when the query is executed.
add_to_result_map = self._add_to_result_map
# if the SELECT statement told us this column is a repeat,
# wrap the callable with one that prevents the addition of the
# targets
if column_is_repeated:
_add_to_result_map = add_to_result_map
def add_to_result_map(keyname, name, objects, type_):
_add_to_result_map(keyname, name, (keyname,), type_)
# if we redefined col_expr for type expressions, wrap the
# callable with one that adds the original column to the targets
elif col_expr is not column:
_add_to_result_map = add_to_result_map
def add_to_result_map(keyname, name, objects, type_):
_add_to_result_map(
keyname, name, (column,) + objects, type_
)
else:
add_to_result_map = None
# this method is used by some of the dialects for RETURNING,
# which has different inputs. _label_returning_column was added
# as the better target for this now however for 1.4 we will keep
# _label_select_column directly compatible with this use case.
# these assertions right now set up the current expected inputs
assert within_columns_clause, (
"_label_select_column is only relevant within "
"the columns clause of a SELECT or RETURNING"
)
if isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr, column.name, alt_names=(column.element,)
)
else:
result_expr = col_expr
elif name:
# here, _columns_plus_names has determined there's an explicit
# label name we need to use. this is the default for
# tablenames_plus_columnnames as well as when columns are being
# deduplicated on name
assert (
proxy_name is not None
), "proxy_name is required if 'name' is passed"
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(
proxy_name,
# this is a hack to allow legacy result column lookups
# to work as they did before; this goes away in 2.0.
# TODO: this only seems to be tested indirectly
# via test/orm/test_deprecations.py. should be a
# resultset test for this
column._tq_label,
),
)
else:
# determine here whether this column should be rendered in
# a labelled context or not, as we were given no required label
# name from the caller. Here we apply heuristics based on the kind
# of SQL expression involved.
if col_expr is not column:
# type-specific expression wrapping the given column,
# so we render a label
render_with_label = True
elif isinstance(column, elements.ColumnClause):
# table-bound column, we render its name as a label if we are
# inside of a subquery only
render_with_label = (
asfrom
and not column.is_literal
and column.table is not None
)
elif isinstance(column, elements.TextClause):
render_with_label = False
elif isinstance(column, elements.UnaryExpression):
# unary expression. notes added as of #12681
#
# By convention, the visit_unary() method
# itself does not add an entry to the result map, and relies
# upon either the inner expression creating a result map
# entry, or if not, by creating a label here that produces
# the result map entry. Where that happens is based on whether
# or not the element immediately inside the unary is a
# NamedColumn subclass or not.
#
# Now, this also impacts how the SELECT is written; if
# we decide to generate a label here, we get the usual
# "~(x+y) AS anon_1" thing in the columns clause. If we
# don't, we don't get an AS at all, we get like
# "~table.column".
#
# But here is the important thing as of modernish (like 1.4)
# versions of SQLAlchemy - **whether or not the AS <label>
# is present in the statement is not actually important**.
# We target result columns **positionally** for a fully
# compiled ``Select()`` object; before 1.4 we needed those
# labels to match in cursor.description etc etc but now it
# really doesn't matter.
# So really, we could set render_with_label True in all cases.
# Or we could just have visit_unary() populate the result map
# in all cases.
#
# What we're doing here is strictly trying to not rock the
# boat too much with when we do/don't render "AS label";
# labels being present helps in the edge cases that we
# "fall back" to named cursor.description matching, labels
# not being present for columns keeps us from having awkward
# phrases like "SELECT DISTINCT table.x AS x".
render_with_label = (
(
# exception case to detect if we render "not boolean"
# as "not <col>" for native boolean or "<col> = 1"
# for non-native boolean. this is controlled by
# visit_is_<true|false>_unary_operator
column.operator
in (operators.is_false, operators.is_true)
and not self.dialect.supports_native_boolean
)
or column._wraps_unnamed_column()
or asfrom
)
elif (
# general class of expressions that don't have a SQL-column
# addressible name. includes scalar selects, bind parameters,
# SQL functions, others
not isinstance(column, elements.NamedColumn)
# deeper check that indicates there's no natural "name" to
# this element, which accommodates for custom SQL constructs
# that might have a ".name" attribute (but aren't SQL
# functions) but are not implementing this more recently added
# base class. in theory the "NamedColumn" check should be
# enough, however here we seek to maintain legacy behaviors
# as well.
and column._non_anon_label is None
):
render_with_label = True
else:
render_with_label = False
if render_with_label:
if not fallback_label_name:
# used by the RETURNING case right now. we generate it
# here as 3rd party dialects may be referring to
# _label_select_column method directly instead of the
# just-added _label_returning_column method
assert not column_is_repeated
fallback_label_name = column._anon_name_label
fallback_label_name = (
elements._truncated_label(fallback_label_name)
if not isinstance(
fallback_label_name, elements._truncated_label
)
else fallback_label_name
)
result_expr = _CompileLabel(
col_expr, fallback_label_name, alt_names=(proxy_name,)
)
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map,
include_table=include_table,
)
return result_expr._compiler_dispatch(self, **column_clause_args)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(
self, table: FromClause, text: Optional[str]
) -> Optional[str]:
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
_default_stack_entry: _CompilerStackEntry
if not typing.TYPE_CHECKING:
_default_stack_entry = util.immutabledict(
[("correlate_froms", frozenset()), ("asfrom_froms", frozenset())]
)
def _display_froms_for_select(
self, select_stmt, asfrom, lateral=False, **kw
):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
compile_state = select_stmt._compile_state_factory(select_stmt, self)
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if asfrom and not lateral:
froms = compile_state._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = compile_state._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
return froms
translate_select_structure: Any = None
"""if not ``None``, should be a callable which accepts ``(select_stmt,
**kw)`` and returns a select object. this is used for structural changes
mostly to accommodate for LIMIT/OFFSET schemes
"""
def visit_select(
self,
select_stmt,
asfrom=False,
insert_into=False,
fromhints=None,
compound_index=None,
select_wraps_for=None,
lateral=False,
from_linter=None,
**kwargs,
):
assert select_wraps_for is None, (
"SQLAlchemy 1.4 requires use of "
"the translate_select_structure hook for structural "
"translations of SELECT objects"
)
if self._collect_params:
self._add_to_params(select_stmt)
# initial setup of SELECT. the compile_state_factory may now
# be creating a totally different SELECT from the one that was
# passed in. for ORM use this will convert from an ORM-state
# SELECT to a regular "Core" SELECT. other composed operations
# such as computation of joins will be performed.
kwargs["within_columns_clause"] = False
compile_state = select_stmt._compile_state_factory(
select_stmt, self, **kwargs
)
kwargs["ambiguous_table_name_map"] = (
compile_state._ambiguous_table_name_map
)
select_stmt = compile_state.statement
toplevel = not self.stack
if toplevel and not self.compile_state:
self.compile_state = compile_state
is_embedded_select = compound_index is not None or insert_into
# translate step for Oracle, SQL Server which often need to
# restructure the SELECT to allow for LIMIT/OFFSET and possibly
# other conditions
if self.translate_select_structure:
new_select_stmt = self.translate_select_structure(
select_stmt, asfrom=asfrom, **kwargs
)
# if SELECT was restructured, maintain a link to the originals
# and assemble a new compile state
if new_select_stmt is not select_stmt:
compile_state_wraps_for = compile_state
select_wraps_for = select_stmt
select_stmt = new_select_stmt
compile_state = select_stmt._compile_state_factory(
select_stmt, self, **kwargs
)
select_stmt = compile_state.statement
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = need_column_expressions = (
toplevel
or entry.get("need_result_map_for_compound", False)
or entry.get("need_result_map_for_nested", False)
)
# indicates there is a CompoundSelect in play and we are not the
# first select
if compound_index:
populate_result_map = False
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and "add_to_result_map" in kwargs:
del kwargs["add_to_result_map"]
froms = self._setup_select_stack(
select_stmt, compile_state, entry, asfrom, lateral, compound_index
)
column_clause_args = kwargs.copy()
column_clause_args.update(
{"within_label_clause": False, "within_columns_clause": False}
)
text = "SELECT " # we're off to a good start !
if select_stmt._post_select_clause is not None:
psc = self.process(select_stmt._post_select_clause, **kwargs)
if psc is not None:
text += psc + " "
if select_stmt._hints:
hint_text, byfrom = self._setup_select_hints(select_stmt)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select_stmt._independent_ctes:
self._dispatch_independent_ctes(select_stmt, kwargs)
if select_stmt._prefixes:
text += self._generate_prefixes(
select_stmt, select_stmt._prefixes, **kwargs
)
text += self.get_select_precolumns(select_stmt, **kwargs)
if select_stmt._pre_columns_clause is not None:
pcc = self.process(select_stmt._pre_columns_clause, **kwargs)
if pcc is not None:
text += pcc + " "
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c
for c in [
self._label_select_column(
select_stmt,
column,
populate_result_map,
asfrom,
column_clause_args,
name=name,
proxy_name=proxy_name,
fallback_label_name=fallback_label_name,
column_is_repeated=repeated,
need_column_expressions=need_column_expressions,
)
for (
name,
proxy_name,
fallback_label_name,
column,
repeated,
) in compile_state.columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select was generated from translate_select,
# rewrite the targeted columns in the result map
translate = dict(
zip(
[
name
for (
key,
proxy_name,
fallback_label_name,
name,
repeated,
) in compile_state.columns_plus_names
],
[
name
for (
key,
proxy_name,
fallback_label_name,
name,
repeated,
) in compile_state_wraps_for.columns_plus_names
],
)
)
self._result_columns = [
ResultColumnsEntry(
key, name, tuple(translate.get(o, o) for o in obj), type_
)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text,
select_stmt,
compile_state,
inner_columns,
froms,
byfrom,
toplevel,
kwargs,
)
if select_stmt._post_body_clause is not None:
pbc = self.process(select_stmt._post_body_clause, **kwargs)
if pbc:
text += " " + pbc
if select_stmt._statement_hints:
per_dialect = [
ht
for (dialect_name, ht) in select_stmt._statement_hints
if dialect_name in ("*", self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
# In compound query, CTEs are shared at the compound level
if self.ctes and (not is_embedded_select or toplevel):
nesting_level = len(self.stack) if not toplevel else None
text = self._render_cte_clause(nesting_level=nesting_level) + text
if select_stmt._suffixes:
text += " " + self._generate_prefixes(
select_stmt, select_stmt._suffixes, **kwargs
)
self.stack.pop(-1)
return text
def _setup_select_hints(
self, select: Select[Unpack[TupleAny]]
) -> Tuple[str, _FromHintsType]:
byfrom = {
from_: hinttext
% {"name": from_._compiler_dispatch(self, ashint=True)}
for (from_, dialect), hinttext in select._hints.items()
if dialect in ("*", self.dialect.name)
}
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(
self, select, compile_state, entry, asfrom, lateral, compound_index
):
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if compound_index == 0:
entry["select_0"] = select
elif compound_index:
select_0 = entry["select_0"]
numcols = len(select_0._all_selected_columns)
if len(compile_state.columns_plus_names) != numcols:
raise exc.CompileError(
"All selectables passed to "
"CompoundSelect must have identical numbers of "
"columns; select #%d has %d columns, select "
"#%d has %d"
% (
1,
numcols,
compound_index + 1,
len(select._all_selected_columns),
)
)
if asfrom and not lateral:
froms = compile_state._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = compile_state._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
new_correlate_froms = set(_from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry: _CompilerStackEntry = {
"asfrom_froms": new_correlate_froms,
"correlate_froms": all_correlate_froms,
"selectable": select,
"compile_state": compile_state,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self,
text,
select,
compile_state,
inner_columns,
froms,
byfrom,
toplevel,
kwargs,
):
text += ", ".join(inner_columns)
if self.linting & COLLECT_CARTESIAN_PRODUCTS:
from_linter = FromLinter({}, set())
warn_linting = self.linting & WARN_LINTING
if toplevel:
self.from_linter = from_linter
else:
from_linter = None
warn_linting = False
# adjust the whitespace for no inner columns, part of #9440,
# so that a no-col SELECT comes out as "SELECT WHERE..." or
# "SELECT FROM ...".
# while it would be better to have built the SELECT starting string
# without trailing whitespace first, then add whitespace only if inner
# cols were present, this breaks compatibility with various custom
# compilation schemes that are currently being tested.
if not inner_columns:
text = text.rstrip()
if froms:
text += " \nFROM "
if select._hints:
text += ", ".join(
[
f._compiler_dispatch(
self,
asfrom=True,
fromhints=byfrom,
from_linter=from_linter,
**kwargs,
)
for f in froms
]
)
else:
text += ", ".join(
[
f._compiler_dispatch(
self,
asfrom=True,
from_linter=from_linter,
**kwargs,
)
for f in froms
]
)
else:
text += self.default_from()
if select._where_criteria:
t = self._generate_delimited_and_list(
select._where_criteria, from_linter=from_linter, **kwargs
)
if t:
text += " \nWHERE " + t
if warn_linting:
assert from_linter is not None
from_linter.warn()
if select._group_by_clauses:
text += self.group_by_clause(select, **kwargs)
if select._having_criteria:
t = self._generate_delimited_and_list(
select._having_criteria, **kwargs
)
if t:
text += " \nHAVING " + t
if select._post_criteria_clause is not None:
pcc = self.process(select._post_criteria_clause, **kwargs)
if pcc is not None:
text += " \n" + pcc
if select._order_by_clauses:
text += self.order_by_clause(select, **kwargs)
if select._has_row_limiting_clause:
text += self._row_limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name in (None, "*") or dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(
self,
nesting_level=None,
include_following_stack=False,
):
"""
include_following_stack
Also render the nesting CTEs on the next stack. Useful for
SQL structures like UNION or INSERT that can wrap SELECT
statements containing nesting CTEs.
"""
if not self.ctes:
return ""
ctes: MutableMapping[CTE, str]
if nesting_level and nesting_level > 1:
ctes = util.OrderedDict()
for cte in list(self.ctes.keys()):
cte_level, cte_name, cte_opts = self.level_name_by_cte[
cte._get_reference_cte()
]
nesting = cte.nesting or cte_opts.nesting
is_rendered_level = cte_level == nesting_level or (
include_following_stack and cte_level == nesting_level + 1
)
if not (nesting and is_rendered_level):
continue
ctes[cte] = self.ctes[cte]
else:
ctes = self.ctes
if not ctes:
return ""
ctes_recursive = any([cte.recursive for cte in ctes])
cte_text = self.get_cte_preamble(ctes_recursive) + " "
cte_text += ", \n".join([txt for txt in ctes.values()])
cte_text += "\n "
if nesting_level and nesting_level > 1:
for cte in list(ctes.keys()):
cte_level, cte_name, cte_opts = self.level_name_by_cte[
cte._get_reference_cte()
]
del self.ctes[cte]
del self.ctes_by_level_name[(cte_level, cte_name)]
del self.level_name_by_cte[cte._get_reference_cte()]
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select: Select[Any], **kw: Any) -> str:
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
if select._distinct_on:
util.warn_deprecated(
"DISTINCT ON is currently supported only by the PostgreSQL "
"dialect. Use of DISTINCT ON for other backends is currently "
"silently ignored, however this usage is deprecated, and will "
"raise CompileError in a future release for all backends "
"that do not support this syntax.",
version="1.4",
)
return "DISTINCT " if select._distinct else ""
def group_by_clause(self, select, **kw):
"""allow dialects to customize how GROUP BY is rendered."""
group_by = self._generate_delimited_list(
select._group_by_clauses, OPERATORS[operators.comma_op], **kw
)
if group_by:
return " GROUP BY " + group_by
else:
return ""
def order_by_clause(self, select, **kw):
"""allow dialects to customize how ORDER BY is rendered."""
order_by = self._generate_delimited_list(
select._order_by_clauses, OPERATORS[operators.comma_op], **kw
)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(
self,
stmt: UpdateBase,
returning_cols: Sequence[_ColumnsClauseElement],
*,
populate_result_map: bool,
**kw: Any,
) -> str:
columns = [
self._label_returning_column(
stmt,
column,
populate_result_map,
fallback_label_name=fallback_label_name,
column_is_repeated=repeated,
name=name,
proxy_name=proxy_name,
**kw,
)
for (
name,
proxy_name,
fallback_label_name,
column,
repeated,
) in stmt._generate_columns_plus_names(
True, cols=base._select_iterables(returning_cols)
)
]
return "RETURNING " + ", ".join(columns)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def fetch_clause(
self,
select,
fetch_clause=None,
require_offset=False,
use_literal_execute_for_simple_int=False,
**kw,
):
if fetch_clause is None:
fetch_clause = select._fetch_clause
fetch_clause_options = select._fetch_clause_options
else:
fetch_clause_options = {"percent": False, "with_ties": False}
text = ""
if select._offset_clause is not None:
offset_clause = select._offset_clause
if (
use_literal_execute_for_simple_int
and select._simple_int_clause(offset_clause)
):
offset_clause = offset_clause.render_literal_execute()
offset_str = self.process(offset_clause, **kw)
text += "\n OFFSET %s ROWS" % offset_str
elif require_offset:
text += "\n OFFSET 0 ROWS"
if fetch_clause is not None:
if (
use_literal_execute_for_simple_int
and select._simple_int_clause(fetch_clause)
):
fetch_clause = fetch_clause.render_literal_execute()
text += "\n FETCH FIRST %s%s ROWS %s" % (
self.process(fetch_clause, **kw),
" PERCENT" if fetch_clause_options["percent"] else "",
"WITH TIES" if fetch_clause_options["with_ties"] else "ONLY",
)
return text
def visit_table(
self,
table,
asfrom=False,
iscrud=False,
ashint=False,
fromhints=None,
use_schema=True,
from_linter=None,
ambiguous_table_name_map=None,
enclosing_alias=None,
**kwargs,
):
if from_linter:
from_linter.froms[table] = table.fullname
if asfrom or ashint:
effective_schema = self.preparer.schema_for_object(table)
if use_schema and effective_schema:
ret = (
self.preparer.quote_schema(effective_schema)
+ "."
+ self.preparer.quote(table.name)
)
else:
ret = self.preparer.quote(table.name)
if (
(
enclosing_alias is None
or enclosing_alias.element is not table
)
and not effective_schema
and ambiguous_table_name_map
and table.name in ambiguous_table_name_map
):
anon_name = self._truncated_identifier(
"alias", ambiguous_table_name_map[table.name]
)
ret = ret + self.get_render_as_alias_suffix(
self.preparer.format_alias(None, anon_name)
)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(
ret, table, fromhints[table], iscrud
)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, from_linter=None, **kwargs):
if from_linter:
from_linter.edges.update(
itertools.product(
_de_clone(join.left._from_objects),
_de_clone(join.right._from_objects),
)
)
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " JOIN "
return (
join.left._compiler_dispatch(
self, asfrom=True, from_linter=from_linter, **kwargs
)
+ join_type
+ join.right._compiler_dispatch(
self, asfrom=True, from_linter=from_linter, **kwargs
)
+ " ON "
# TODO: likely need asfrom=True here?
+ join.onclause._compiler_dispatch(
self, from_linter=from_linter, **kwargs
)
)
def _setup_crud_hints(self, stmt, table_text):
dialect_hints = {
table: hint_text
for (table, dialect), hint_text in stmt._hints.items()
if dialect in ("*", self.dialect.name)
}
if stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text, stmt.table, dialect_hints[stmt.table], True
)
return dialect_hints, table_text
# within the realm of "insertmanyvalues sentinel columns",
# these lookups match different kinds of Column() configurations
# to specific backend capabilities. they are broken into two
# lookups, one for autoincrement columns and the other for non
# autoincrement columns
_sentinel_col_non_autoinc_lookup = util.immutabledict(
{
_SentinelDefaultCharacterization.CLIENTSIDE: (
InsertmanyvaluesSentinelOpts._SUPPORTED_OR_NOT
),
_SentinelDefaultCharacterization.SENTINEL_DEFAULT: (
InsertmanyvaluesSentinelOpts._SUPPORTED_OR_NOT
),
_SentinelDefaultCharacterization.NONE: (
InsertmanyvaluesSentinelOpts._SUPPORTED_OR_NOT
),
_SentinelDefaultCharacterization.IDENTITY: (
InsertmanyvaluesSentinelOpts.IDENTITY
),
_SentinelDefaultCharacterization.SEQUENCE: (
InsertmanyvaluesSentinelOpts.SEQUENCE
),
}
)
_sentinel_col_autoinc_lookup = _sentinel_col_non_autoinc_lookup.union(
{
_SentinelDefaultCharacterization.NONE: (
InsertmanyvaluesSentinelOpts.AUTOINCREMENT
),
}
)
def _get_sentinel_column_for_table(
self, table: Table
) -> Optional[Sequence[Column[Any]]]:
"""given a :class:`.Table`, return a usable sentinel column or
columns for this dialect if any.
Return None if no sentinel columns could be identified, or raise an
error if a column was marked as a sentinel explicitly but isn't
compatible with this dialect.
"""
sentinel_opts = self.dialect.insertmanyvalues_implicit_sentinel
sentinel_characteristics = table._sentinel_column_characteristics
sent_cols = sentinel_characteristics.columns
if sent_cols is None:
return None
if sentinel_characteristics.is_autoinc:
bitmask = self._sentinel_col_autoinc_lookup.get(
sentinel_characteristics.default_characterization, 0
)
else:
bitmask = self._sentinel_col_non_autoinc_lookup.get(
sentinel_characteristics.default_characterization, 0
)
if sentinel_opts & bitmask:
return sent_cols
if sentinel_characteristics.is_explicit:
# a column was explicitly marked as insert_sentinel=True,
# however it is not compatible with this dialect. they should
# not indicate this column as a sentinel if they need to include
# this dialect.
# TODO: do we want non-primary key explicit sentinel cols
# that can gracefully degrade for some backends?
# insert_sentinel="degrade" perhaps. not for the initial release.
# I am hoping people are generally not dealing with this sentinel
# business at all.
# if is_explicit is True, there will be only one sentinel column.
raise exc.InvalidRequestError(
f"Column {sent_cols[0]} can't be explicitly "
"marked as a sentinel column when using the "
f"{self.dialect.name} dialect, as the "
"particular type of default generation on this column is "
"not currently compatible with this dialect's specific "
f"INSERT..RETURNING syntax which can receive the "
"server-generated value in "
"a deterministic way. To remove this error, remove "
"insert_sentinel=True from primary key autoincrement "
"columns; these columns are automatically used as "
"sentinels for supported dialects in any case."
)
return None
def _deliver_insertmanyvalues_batches(
self,
statement: str,
parameters: _DBAPIMultiExecuteParams,
compiled_parameters: List[_MutableCoreSingleExecuteParams],
generic_setinputsizes: Optional[_GenericSetInputSizesType],
batch_size: int,
sort_by_parameter_order: bool,
schema_translate_map: Optional[SchemaTranslateMapType],
) -> Iterator[_InsertManyValuesBatch]:
imv = self._insertmanyvalues
assert imv is not None
if not imv.sentinel_param_keys:
_sentinel_from_params = None
else:
_sentinel_from_params = operator.itemgetter(
*imv.sentinel_param_keys
)
lenparams = len(parameters)
if imv.is_default_expr and not self.dialect.supports_default_metavalue:
# backend doesn't support
# INSERT INTO table (pk_col) VALUES (DEFAULT), (DEFAULT), ...
# at the moment this is basically SQL Server due to
# not being able to use DEFAULT for identity column
# just yield out that many single statements! still
# faster than a whole connection.execute() call ;)
#
# note we still are taking advantage of the fact that we know
# we are using RETURNING. The generalized approach of fetching
# cursor.lastrowid etc. still goes through the more heavyweight
# "ExecutionContext per statement" system as it isn't usable
# as a generic "RETURNING" approach
use_row_at_a_time = True
downgraded = False
elif not self.dialect.supports_multivalues_insert or (
sort_by_parameter_order
and self._result_columns
and (imv.sentinel_columns is None or imv.includes_upsert_behaviors)
):
# deterministic order was requested and the compiler could
# not organize sentinel columns for this dialect/statement.
# use row at a time
use_row_at_a_time = True
downgraded = True
else:
use_row_at_a_time = False
downgraded = False
if use_row_at_a_time:
for batchnum, (param, compiled_param) in enumerate(
cast(
"Sequence[Tuple[_DBAPISingleExecuteParams, _MutableCoreSingleExecuteParams]]", # noqa: E501
zip(parameters, compiled_parameters),
),
1,
):
yield _InsertManyValuesBatch(
statement,
param,
generic_setinputsizes,
[param],
(
[_sentinel_from_params(compiled_param)]
if _sentinel_from_params
else []
),
1,
batchnum,
lenparams,
sort_by_parameter_order,
downgraded,
)
return
if schema_translate_map:
rst = functools.partial(
self.preparer._render_schema_translates,
schema_translate_map=schema_translate_map,
)
else:
rst = None
imv_single_values_expr = imv.single_values_expr
if rst:
imv_single_values_expr = rst(imv_single_values_expr)
executemany_values = f"({imv_single_values_expr})"
statement = statement.replace(executemany_values, "__EXECMANY_TOKEN__")
# Use optional insertmanyvalues_max_parameters
# to further shrink the batch size so that there are no more than
# insertmanyvalues_max_parameters params.
# Currently used by SQL Server, which limits statements to 2100 bound
# parameters (actually 2099).
max_params = self.dialect.insertmanyvalues_max_parameters
if max_params:
total_num_of_params = len(self.bind_names)
num_params_per_batch = len(imv.insert_crud_params)
num_params_outside_of_batch = (
total_num_of_params - num_params_per_batch
)
batch_size = min(
batch_size,
(
(max_params - num_params_outside_of_batch)
// num_params_per_batch
),
)
batches = cast("List[Sequence[Any]]", list(parameters))
compiled_batches = cast(
"List[Sequence[Any]]", list(compiled_parameters)
)
processed_setinputsizes: Optional[_GenericSetInputSizesType] = None
batchnum = 1
total_batches = lenparams // batch_size + (
1 if lenparams % batch_size else 0
)
insert_crud_params = imv.insert_crud_params
assert insert_crud_params is not None
if rst:
insert_crud_params = [
(col, key, rst(expr), st)
for col, key, expr, st in insert_crud_params
]
escaped_bind_names: Mapping[str, str]
expand_pos_lower_index = expand_pos_upper_index = 0
if not self.positional:
if self.escaped_bind_names:
escaped_bind_names = self.escaped_bind_names
else:
escaped_bind_names = {}
all_keys = set(parameters[0])
def apply_placeholders(keys, formatted):
for key in keys:
key = escaped_bind_names.get(key, key)
formatted = formatted.replace(
self.bindtemplate % {"name": key},
self.bindtemplate
% {"name": f"{key}__EXECMANY_INDEX__"},
)
return formatted
if imv.embed_values_counter:
imv_values_counter = ", _IMV_VALUES_COUNTER"
else:
imv_values_counter = ""
formatted_values_clause = f"""({', '.join(
apply_placeholders(bind_keys, formatted)
for _, _, formatted, bind_keys in insert_crud_params
)}{imv_values_counter})"""
keys_to_replace = all_keys.intersection(
escaped_bind_names.get(key, key)
for _, _, _, bind_keys in insert_crud_params
for key in bind_keys
)
base_parameters = {
key: parameters[0][key]
for key in all_keys.difference(keys_to_replace)
}
executemany_values_w_comma = ""
else:
formatted_values_clause = ""
keys_to_replace = set()
base_parameters = {}
if imv.embed_values_counter:
executemany_values_w_comma = (
f"({imv_single_values_expr}, _IMV_VALUES_COUNTER), "
)
else:
executemany_values_w_comma = f"({imv_single_values_expr}), "
all_names_we_will_expand: Set[str] = set()
for elem in imv.insert_crud_params:
all_names_we_will_expand.update(elem[3])
# get the start and end position in a particular list
# of parameters where we will be doing the "expanding".
# statements can have params on either side or both sides,
# given RETURNING and CTEs
if all_names_we_will_expand:
positiontup = self.positiontup
assert positiontup is not None
all_expand_positions = {
idx
for idx, name in enumerate(positiontup)
if name in all_names_we_will_expand
}
expand_pos_lower_index = min(all_expand_positions)
expand_pos_upper_index = max(all_expand_positions) + 1
assert (
len(all_expand_positions)
== expand_pos_upper_index - expand_pos_lower_index
)
if self._numeric_binds:
escaped = re.escape(self._numeric_binds_identifier_char)
executemany_values_w_comma = re.sub(
rf"{escaped}\d+", "%s", executemany_values_w_comma
)
while batches:
batch = batches[0:batch_size]
compiled_batch = compiled_batches[0:batch_size]
batches[0:batch_size] = []
compiled_batches[0:batch_size] = []
if batches:
current_batch_size = batch_size
else:
current_batch_size = len(batch)
if generic_setinputsizes:
# if setinputsizes is present, expand this collection to
# suit the batch length as well
# currently this will be mssql+pyodbc for internal dialects
processed_setinputsizes = [
(new_key, len_, typ)
for new_key, len_, typ in (
(f"{key}_{index}", len_, typ)
for index in range(current_batch_size)
for key, len_, typ in generic_setinputsizes
)
]
replaced_parameters: Any
if self.positional:
num_ins_params = imv.num_positional_params_counted
batch_iterator: Iterable[Sequence[Any]]
extra_params_left: Sequence[Any]
extra_params_right: Sequence[Any]
if num_ins_params == len(batch[0]):
extra_params_left = extra_params_right = ()
batch_iterator = batch
else:
extra_params_left = batch[0][:expand_pos_lower_index]
extra_params_right = batch[0][expand_pos_upper_index:]
batch_iterator = (
b[expand_pos_lower_index:expand_pos_upper_index]
for b in batch
)
if imv.embed_values_counter:
expanded_values_string = (
"".join(
executemany_values_w_comma.replace(
"_IMV_VALUES_COUNTER", str(i)
)
for i, _ in enumerate(batch)
)
)[:-2]
else:
expanded_values_string = (
(executemany_values_w_comma * current_batch_size)
)[:-2]
if self._numeric_binds and num_ins_params > 0:
# numeric will always number the parameters inside of
# VALUES (and thus order self.positiontup) to be higher
# than non-VALUES parameters, no matter where in the
# statement those non-VALUES parameters appear (this is
# ensured in _process_numeric by numbering first all
# params that are not in _values_bindparam)
# therefore all extra params are always
# on the left side and numbered lower than the VALUES
# parameters
assert not extra_params_right
start = expand_pos_lower_index + 1
end = num_ins_params * (current_batch_size) + start
# need to format here, since statement may contain
# unescaped %, while values_string contains just (%s, %s)
positions = tuple(
f"{self._numeric_binds_identifier_char}{i}"
for i in range(start, end)
)
expanded_values_string = expanded_values_string % positions
replaced_statement = statement.replace(
"__EXECMANY_TOKEN__", expanded_values_string
)
replaced_parameters = tuple(
itertools.chain.from_iterable(batch_iterator)
)
replaced_parameters = (
extra_params_left
+ replaced_parameters
+ extra_params_right
)
else:
replaced_values_clauses = []
replaced_parameters = base_parameters.copy()
for i, param in enumerate(batch):
fmv = formatted_values_clause.replace(
"EXECMANY_INDEX__", str(i)
)
if imv.embed_values_counter:
fmv = fmv.replace("_IMV_VALUES_COUNTER", str(i))
replaced_values_clauses.append(fmv)
replaced_parameters.update(
{f"{key}__{i}": param[key] for key in keys_to_replace}
)
replaced_statement = statement.replace(
"__EXECMANY_TOKEN__",
", ".join(replaced_values_clauses),
)
yield _InsertManyValuesBatch(
replaced_statement,
replaced_parameters,
processed_setinputsizes,
batch,
(
[_sentinel_from_params(cb) for cb in compiled_batch]
if _sentinel_from_params
else []
),
current_batch_size,
batchnum,
total_batches,
sort_by_parameter_order,
False,
)
batchnum += 1
def visit_insert(
self, insert_stmt, visited_bindparam=None, visiting_cte=None, **kw
):
compile_state = insert_stmt._compile_state_factory(
insert_stmt, self, **kw
)
insert_stmt = compile_state.statement
if visiting_cte is not None:
kw["visiting_cte"] = visiting_cte
toplevel = False
else:
toplevel = not self.stack
if toplevel:
self.isinsert = True
if not self.dml_compile_state:
self.dml_compile_state = compile_state
if not self.compile_state:
self.compile_state = compile_state
self.stack.append(
{
"correlate_froms": set(),
"asfrom_froms": set(),
"selectable": insert_stmt,
}
)
counted_bindparam = 0
# reset any incoming "visited_bindparam" collection
visited_bindparam = None
# for positional, insertmanyvalues needs to know how many
# bound parameters are in the VALUES sequence; there's no simple
# rule because default expressions etc. can have zero or more
# params inside them. After multiple attempts to figure this out,
# this very simplistic "count after" works and is
# likely the least amount of callcounts, though looks clumsy
if self.positional and visiting_cte is None:
# if we are inside a CTE, don't count parameters
# here since they wont be for insertmanyvalues. keep
# visited_bindparam at None so no counting happens.
# see #9173
visited_bindparam = []
crud_params_struct = crud._get_crud_params(
self,
insert_stmt,
compile_state,
toplevel,
visited_bindparam=visited_bindparam,
**kw,
)
if self.positional and visited_bindparam is not None:
counted_bindparam = len(visited_bindparam)
if self._numeric_binds:
if self._values_bindparam is not None:
self._values_bindparam += visited_bindparam
else:
self._values_bindparam = visited_bindparam
crud_params_single = crud_params_struct.single_params
if (
not crud_params_single
and not self.dialect.supports_default_values
and not self.dialect.supports_default_metavalue
and not self.dialect.supports_empty_insert
):
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support empty "
"inserts." % self.dialect.name
)
if compile_state._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." % self.dialect.name
)
elif (
self.implicit_returning or insert_stmt._returning
) and insert_stmt._sort_by_parameter_order:
raise exc.CompileError(
"RETURNING cannot be determinstically sorted when "
"using an INSERT which includes multi-row values()."
)
crud_params_single = crud_params_struct.single_params
else:
crud_params_single = crud_params_struct.single_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(
insert_stmt, insert_stmt._prefixes, **kw
)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
_, table_text = self._setup_crud_hints(insert_stmt, table_text)
if insert_stmt._independent_ctes:
self._dispatch_independent_ctes(insert_stmt, kw)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ", ".join(
[expr for _, expr, _, _ in crud_params_single]
)
# look for insertmanyvalues attributes that would have been configured
# by crud.py as it scanned through the columns to be part of the
# INSERT
use_insertmanyvalues = crud_params_struct.use_insertmanyvalues
named_sentinel_params: Optional[Sequence[str]] = None
add_sentinel_cols = None
implicit_sentinel = False
returning_cols = self.implicit_returning or insert_stmt._returning
if returning_cols:
add_sentinel_cols = crud_params_struct.use_sentinel_columns
if add_sentinel_cols is not None:
assert use_insertmanyvalues
# search for the sentinel column explicitly present
# in the INSERT columns list, and additionally check that
# this column has a bound parameter name set up that's in the
# parameter list. If both of these cases are present, it means
# we will have a client side value for the sentinel in each
# parameter set.
_params_by_col = {
col: param_names
for col, _, _, param_names in crud_params_single
}
named_sentinel_params = []
for _add_sentinel_col in add_sentinel_cols:
if _add_sentinel_col not in _params_by_col:
named_sentinel_params = None
break
param_name = self._within_exec_param_key_getter(
_add_sentinel_col
)
if param_name not in _params_by_col[_add_sentinel_col]:
named_sentinel_params = None
break
named_sentinel_params.append(param_name)
if named_sentinel_params is None:
# if we are not going to have a client side value for
# the sentinel in the parameter set, that means it's
# an autoincrement, an IDENTITY, or a server-side SQL
# expression like nextval('seqname'). So this is
# an "implicit" sentinel; we will look for it in
# RETURNING
# only, and then sort on it. For this case on PG,
# SQL Server we have to use a special INSERT form
# that guarantees the server side function lines up with
# the entries in the VALUES.
if (
self.dialect.insertmanyvalues_implicit_sentinel
& InsertmanyvaluesSentinelOpts.ANY_AUTOINCREMENT
):
implicit_sentinel = True
else:
# here, we are not using a sentinel at all
# and we are likely the SQLite dialect.
# The first add_sentinel_col that we have should not
# be marked as "insert_sentinel=True". if it was,
# an error should have been raised in
# _get_sentinel_column_for_table.
assert not add_sentinel_cols[0]._insert_sentinel, (
"sentinel selection rules should have prevented "
"us from getting here for this dialect"
)
# always put the sentinel columns last. even if they are
# in the returning list already, they will be there twice
# then.
returning_cols = list(returning_cols) + list(add_sentinel_cols)
returning_clause = self.returning_clause(
insert_stmt,
returning_cols,
populate_result_map=toplevel,
)
if self.returning_precedes_values:
text += " " + returning_clause
else:
returning_clause = None
if insert_stmt.select is not None:
# placed here by crud.py
select_text = self.process(
self.stack[-1]["insert_from_select"], insert_into=True, **kw
)
if self.ctes and self.dialect.cte_follows_insert:
nesting_level = len(self.stack) if not toplevel else None
text += " %s%s" % (
self._render_cte_clause(
nesting_level=nesting_level,
include_following_stack=True,
),
select_text,
)
else:
text += " %s" % select_text
elif not crud_params_single and supports_default_values:
text += " DEFAULT VALUES"
if use_insertmanyvalues:
self._insertmanyvalues = _InsertManyValues(
True,
self.dialect.default_metavalue_token,
cast(
"List[crud._CrudParamElementStr]", crud_params_single
),
counted_bindparam,
sort_by_parameter_order=(
insert_stmt._sort_by_parameter_order
),
includes_upsert_behaviors=(
insert_stmt._post_values_clause is not None
),
sentinel_columns=add_sentinel_cols,
num_sentinel_columns=(
len(add_sentinel_cols) if add_sentinel_cols else 0
),
implicit_sentinel=implicit_sentinel,
)
elif compile_state._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)"
% (", ".join(value for _, _, value, _ in crud_param_set))
for crud_param_set in crud_params_struct.all_multi_params
),
)
else:
insert_single_values_expr = ", ".join(
[
value
for _, _, value, _ in cast(
"List[crud._CrudParamElementStr]",
crud_params_single,
)
]
)
if use_insertmanyvalues:
if (
implicit_sentinel
and (
self.dialect.insertmanyvalues_implicit_sentinel
& InsertmanyvaluesSentinelOpts.USE_INSERT_FROM_SELECT
)
# this is checking if we have
# INSERT INTO table (id) VALUES (DEFAULT).
and not (crud_params_struct.is_default_metavalue_only)
):
# if we have a sentinel column that is server generated,
# then for selected backends render the VALUES list as a
# subquery. This is the orderable form supported by
# PostgreSQL and SQL Server.
embed_sentinel_value = True
render_bind_casts = (
self.dialect.insertmanyvalues_implicit_sentinel
& InsertmanyvaluesSentinelOpts.RENDER_SELECT_COL_CASTS
)
colnames = ", ".join(
f"p{i}" for i, _ in enumerate(crud_params_single)
)
if render_bind_casts:
# render casts for the SELECT list. For PG, we are
# already rendering bind casts in the parameter list,
# selectively for the more "tricky" types like ARRAY.
# however, even for the "easy" types, if the parameter
# is NULL for every entry, PG gives up and says
# "it must be TEXT", which fails for other easy types
# like ints. So we cast on this side too.
colnames_w_cast = ", ".join(
self.render_bind_cast(
col.type,
col.type._unwrapped_dialect_impl(self.dialect),
f"p{i}",
)
for i, (col, *_) in enumerate(crud_params_single)
)
else:
colnames_w_cast = colnames
text += (
f" SELECT {colnames_w_cast} FROM "
f"(VALUES ({insert_single_values_expr})) "
f"AS imp_sen({colnames}, sen_counter) "
"ORDER BY sen_counter"
)
else:
# otherwise, if no sentinel or backend doesn't support
# orderable subquery form, use a plain VALUES list
embed_sentinel_value = False
text += f" VALUES ({insert_single_values_expr})"
self._insertmanyvalues = _InsertManyValues(
is_default_expr=False,
single_values_expr=insert_single_values_expr,
insert_crud_params=cast(
"List[crud._CrudParamElementStr]",
crud_params_single,
),
num_positional_params_counted=counted_bindparam,
sort_by_parameter_order=(
insert_stmt._sort_by_parameter_order
),
includes_upsert_behaviors=(
insert_stmt._post_values_clause is not None
),
sentinel_columns=add_sentinel_cols,
num_sentinel_columns=(
len(add_sentinel_cols) if add_sentinel_cols else 0
),
sentinel_param_keys=named_sentinel_params,
implicit_sentinel=implicit_sentinel,
embed_values_counter=embed_sentinel_value,
)
else:
text += f" VALUES ({insert_single_values_expr})"
if insert_stmt._post_values_clause is not None:
post_values_clause = self.process(
insert_stmt._post_values_clause, **kw
)
if post_values_clause:
text += " " + post_values_clause
if returning_clause and not self.returning_precedes_values:
text += " " + returning_clause
if self.ctes and not self.dialect.cte_follows_insert:
nesting_level = len(self.stack) if not toplevel else None
text = (
self._render_cte_clause(
nesting_level=nesting_level,
include_following_stack=True,
)
+ text
)
self.stack.pop(-1)
return text
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
kw["asfrom"] = True
return from_table._compiler_dispatch(self, iscrud=True, **kw)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within UPDATE"
)
def update_post_criteria_clause(
self, update_stmt: Update, **kw: Any
) -> Optional[str]:
"""provide a hook to override generation after the WHERE criteria
in an UPDATE statement
.. versionadded:: 2.1
"""
if update_stmt._post_criteria_clause is not None:
return self.process(
update_stmt._post_criteria_clause,
**kw,
)
else:
return None
def delete_post_criteria_clause(
self, delete_stmt: Delete, **kw: Any
) -> Optional[str]:
"""provide a hook to override generation after the WHERE criteria
in a DELETE statement
.. versionadded:: 2.1
"""
if delete_stmt._post_criteria_clause is not None:
return self.process(
delete_stmt._post_criteria_clause,
**kw,
)
else:
return None
def visit_update(
self,
update_stmt: Update,
visiting_cte: Optional[CTE] = None,
**kw: Any,
) -> str:
compile_state = update_stmt._compile_state_factory(
update_stmt, self, **kw
)
if TYPE_CHECKING:
assert isinstance(compile_state, UpdateDMLState)
update_stmt = compile_state.statement # type: ignore[assignment]
if visiting_cte is not None:
kw["visiting_cte"] = visiting_cte
toplevel = False
else:
toplevel = not self.stack
if toplevel:
self.isupdate = True
if not self.dml_compile_state:
self.dml_compile_state = compile_state
if not self.compile_state:
self.compile_state = compile_state
if self.linting & COLLECT_CARTESIAN_PRODUCTS:
from_linter = FromLinter({}, set())
warn_linting = self.linting & WARN_LINTING
if toplevel:
self.from_linter = from_linter
else:
from_linter = None
warn_linting = False
extra_froms = compile_state._extra_froms
is_multitable = bool(extra_froms)
if is_multitable:
# main table might be a JOIN
main_froms = set(_from_objects(update_stmt.table))
render_extra_froms = [
f for f in extra_froms if f not in main_froms
]
correlate_froms = main_froms.union(extra_froms)
else:
render_extra_froms = []
correlate_froms = {update_stmt.table}
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": update_stmt,
}
)
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(
update_stmt, update_stmt._prefixes, **kw
)
table_text = self.update_tables_clause(
update_stmt,
update_stmt.table,
render_extra_froms,
from_linter=from_linter,
**kw,
)
crud_params_struct = crud._get_crud_params(
self, update_stmt, compile_state, toplevel, **kw
)
crud_params = crud_params_struct.single_params
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text
)
else:
dialect_hints = None
if update_stmt._independent_ctes:
self._dispatch_independent_ctes(update_stmt, kw)
text += table_text
text += " SET "
text += ", ".join(
expr + "=" + value
for _, expr, value, _ in cast(
"List[Tuple[Any, str, str, Any]]", crud_params
)
)
if self.implicit_returning or update_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt,
self.implicit_returning or update_stmt._returning,
populate_result_map=toplevel,
)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
render_extra_froms,
dialect_hints,
from_linter=from_linter,
**kw,
)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._where_criteria:
t = self._generate_delimited_and_list(
update_stmt._where_criteria, from_linter=from_linter, **kw
)
if t:
text += " WHERE " + t
ulc = self.update_post_criteria_clause(
update_stmt, from_linter=from_linter, **kw
)
if ulc:
text += " " + ulc
if (
self.implicit_returning or update_stmt._returning
) and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt,
self.implicit_returning or update_stmt._returning,
populate_result_map=toplevel,
)
if self.ctes:
nesting_level = len(self.stack) if not toplevel else None
text = self._render_cte_clause(nesting_level=nesting_level) + text
if warn_linting:
assert from_linter is not None
from_linter.warn(stmt_type="UPDATE")
self.stack.pop(-1)
return text # type: ignore[no-any-return]
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
DELETE..FROM clause.
This can be used to implement DELETE..USING for example.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within DELETE"
)
def delete_table_clause(self, delete_stmt, from_table, extra_froms, **kw):
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, **kw
)
def visit_delete(self, delete_stmt, visiting_cte=None, **kw):
compile_state = delete_stmt._compile_state_factory(
delete_stmt, self, **kw
)
delete_stmt = compile_state.statement
if visiting_cte is not None:
kw["visiting_cte"] = visiting_cte
toplevel = False
else:
toplevel = not self.stack
if toplevel:
self.isdelete = True
if not self.dml_compile_state:
self.dml_compile_state = compile_state
if not self.compile_state:
self.compile_state = compile_state
if self.linting & COLLECT_CARTESIAN_PRODUCTS:
from_linter = FromLinter({}, set())
warn_linting = self.linting & WARN_LINTING
if toplevel:
self.from_linter = from_linter
else:
from_linter = None
warn_linting = False
extra_froms = compile_state._extra_froms
correlate_froms = {delete_stmt.table}.union(extra_froms)
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": delete_stmt,
}
)
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(
delete_stmt, delete_stmt._prefixes, **kw
)
text += "FROM "
try:
table_text = self.delete_table_clause(
delete_stmt,
delete_stmt.table,
extra_froms,
from_linter=from_linter,
)
except TypeError:
# anticipate 3rd party dialects that don't include **kw
# TODO: remove in 2.1
table_text = self.delete_table_clause(
delete_stmt, delete_stmt.table, extra_froms
)
if from_linter:
_ = self.process(delete_stmt.table, from_linter=from_linter)
crud._get_crud_params(self, delete_stmt, compile_state, toplevel, **kw)
if delete_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
delete_stmt, table_text
)
else:
dialect_hints = None
if delete_stmt._independent_ctes:
self._dispatch_independent_ctes(delete_stmt, kw)
text += table_text
if (
self.implicit_returning or delete_stmt._returning
) and self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt,
self.implicit_returning or delete_stmt._returning,
populate_result_map=toplevel,
)
if extra_froms:
extra_from_text = self.delete_extra_from_clause(
delete_stmt,
delete_stmt.table,
extra_froms,
dialect_hints,
from_linter=from_linter,
**kw,
)
if extra_from_text:
text += " " + extra_from_text
if delete_stmt._where_criteria:
t = self._generate_delimited_and_list(
delete_stmt._where_criteria, from_linter=from_linter, **kw
)
if t:
text += " WHERE " + t
dlc = self.delete_post_criteria_clause(
delete_stmt, from_linter=from_linter, **kw
)
if dlc:
text += " " + dlc
if (
self.implicit_returning or delete_stmt._returning
) and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt,
self.implicit_returning or delete_stmt._returning,
populate_result_map=toplevel,
)
if self.ctes:
nesting_level = len(self.stack) if not toplevel else None
text = self._render_cte_clause(nesting_level=nesting_level) + text
if warn_linting:
assert from_linter is not None
from_linter.warn(stmt_type="DELETE")
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt, **kw):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt, **kw):
return "ROLLBACK TO SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_release_savepoint(self, savepoint_stmt, **kw):
return "RELEASE SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
| SQLCompiler |
python | PrefectHQ__prefect | tests/cli/test_work_queues.py | {
"start": 19509,
"end": 20773
} | class ____:
def test_ls(self, work_queue):
invoke_and_assert(
command="work-queue ls",
expected_code=0,
)
def test_ls_with_pool(
self,
work_queue_1,
):
cmd = f"work-queue ls -p {work_queue_1.work_pool.name}"
invoke_and_assert(
command=cmd,
expected_code=0,
)
def test_ls_with_zero_concurrency_limit(
self,
work_queue_1,
):
invoke_and_assert(
command=f"work-queue set-concurrency-limit {work_queue_1.name} 0",
expected_code=0,
)
invoke_and_assert(
command="work-queue set-concurrency-limit default 0",
expected_code=0,
)
invoke_and_assert(
command=f"work-queue ls -p {work_queue_1.work_pool.name}",
expected_code=0,
expected_output_does_not_contain="None",
)
def test_ls_with_bad_pool(
self,
work_queue_1,
):
cmd = f"work-queue ls -p {work_queue_1.work_pool.name}-bad"
res = invoke_and_assert(
command=cmd,
expected_code=1,
)
assert f"No work pool found: '{work_queue_1.work_pool.name}-bad'" in res.output
| TestLS |
python | realpython__materials | python-unittest/test_stack.py | {
"start": 43,
"end": 1041
} | class ____(unittest.TestCase):
def setUp(self):
self.stack = Stack()
def tearDown(self):
del self.stack
def test_push(self):
self.stack.push(1)
self.assertEqual(self.stack.items, [1])
def test_pop(self):
self.stack.push(2)
item = self.stack.pop()
self.assertEqual(item, 2)
def test_len(self):
self.stack.push(3)
self.stack.push(4)
self.assertEqual(len(self.stack), 2)
def test_iter(self):
items = [5, 6, 7]
for item in items:
self.stack.push(item)
for stack_item, test_item in zip(self.stack, items, strict=False):
self.assertEqual(stack_item, test_item)
def test_reversed(self):
items = [5, 6, 7]
for item in items:
self.stack.push(item)
reversed_stack = reversed(self.stack)
self.assertEqual(list(reversed_stack), [7, 6, 5])
if __name__ == "__main__":
unittest.main(verbosity=2)
| TestStack |
python | coleifer__peewee | tests/regressions.py | {
"start": 6411,
"end": 7283
} | class ____(ModelTestCase):
@requires_mysql
@requires_models(User)
def test_count_union(self):
with self.database.atomic():
for i in range(5):
User.create(username='user-%d' % i)
lhs = User.select()
rhs = User.select()
query = (lhs | rhs)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" '
'UNION '
'SELECT "t2"."id", "t2"."username" FROM "users" AS "t2"'), [])
self.assertEqual(query.count(), 5)
query = query.limit(3)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" '
'UNION '
'SELECT "t2"."id", "t2"."username" FROM "users" AS "t2" '
'LIMIT ?'), [3])
self.assertEqual(query.count(), 3)
| TestCountUnionRegression |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cli/configs/postgres.py | {
"start": 386,
"end": 3076
} | class ____(BaseTargetConfigs):
"""
Target configs contain credentials and
settings, specific to Postgres.
To find valid keys, head to the [Postgres Profile](
https://docs.getdbt.com/reference/warehouse-profiles/postgres-profile)
page.
Attributes:
credentials: The credentials to use to authenticate; if there are
duplicate keys between credentials and TargetConfigs,
e.g. schema, an error will be raised.
"""
_block_type_name = "dbt CLI Postgres Target Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_description = "dbt CLI target configs containing credentials and settings specific to Postgres." # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
type: Literal["postgres"] = Field(
default="postgres", description="The type of the target."
)
credentials: SqlAlchemyConnector = Field(
default=...,
description=(
"The credentials to use to authenticate; if there are duplicate keys "
"between credentials and TargetConfigs, e.g. schema, "
"an error will be raised."
),
) # noqa
def get_configs(self) -> Dict[str, Any]:
"""
Returns the dbt configs specific to Postgres profile.
Returns:
A configs JSON.
"""
all_configs_json = super().get_configs()
rename_keys = {
# dbt
"type": "type",
"schema": "schema",
"threads": "threads",
# general
"host": "host",
"username": "user",
"password": "password",
"port": "port",
"database": "dbname",
# optional
"keepalives_idle": "keepalives_idle",
"connect_timeout": "connect_timeout",
"retries": "retries",
"search_path": "search_path",
"role": "role",
"sslmode": "sslmode",
}
configs_json = {}
extras = self.extras or {}
for key in all_configs_json.keys():
if key not in rename_keys and key not in extras:
# skip invalid keys, like fetch_size + poll_frequency_s
continue
# rename key to something dbt profile expects
dbt_key = rename_keys.get(key) or key
configs_json[dbt_key] = all_configs_json[key]
port = configs_json.get("port")
if port is not None:
configs_json["port"] = int(port)
return configs_json
| PostgresTargetConfigs |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B027.py | {
"start": 301,
"end": 1407
} | class ____(ABC):
def empty_1(self): # error
...
def empty_2(self): # error
pass
def empty_3(self): # error
"""docstring"""
...
def empty_4(self): # error
"""multiple ellipsis/pass"""
...
pass
...
pass
@notabstract
def abstract_0(self):
...
@abstractmethod
def abstract_1(self):
...
@abstractmethod
def abstract_2(self):
pass
@abc.abstractmethod
def abstract_3(self):
...
@abc.abstractproperty
def abstract_4(self):
...
@abstractproperty
def abstract_5(self):
...
@notabstract_property
def abstract_6(self):
...
@abstractclassmethod
def abstract_7(self):
pass
@abc.abstractclassmethod
def abstract_8(self):
...
@abstractstaticmethod
def abstract_9(self):
pass
@abc.abstractstaticmethod
def abstract_10(self):
...
def body_1(self):
print("foo")
...
def body_2(self):
self.body_1()
| AbstractClass |
python | tensorflow__tensorflow | tensorflow/python/data/ops/dataset_ops.py | {
"start": 186466,
"end": 186831
} | class ____(DatasetV2):
"""A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, element_spec):
self._element_spec = element_spec
super(_VariantDataset, self).__init__(dataset_variant)
def _inputs(self):
return []
@property
def element_spec(self):
return self._element_spec
| _VariantDataset |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 730838,
"end": 731439
} | class ____(sgqlc.types.Type, Contribution):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("pull_request", "pull_request_review", "repository")
pull_request = sgqlc.types.Field(
sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest"
)
pull_request_review = sgqlc.types.Field(
sgqlc.types.non_null("PullRequestReview"), graphql_name="pullRequestReview"
)
repository = sgqlc.types.Field(
sgqlc.types.non_null("Repository"), graphql_name="repository"
)
| CreatedPullRequestReviewContribution |
python | doocs__leetcode | solution/1500-1599/1503.Last Moment Before All Ants Fall Out of a Plank/Solution.py | {
"start": 0,
"end": 241
} | class ____:
def getLastMoment(self, n: int, left: List[int], right: List[int]) -> int:
ans = 0
for x in left:
ans = max(ans, x)
for x in right:
ans = max(ans, n - x)
return ans
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_config_map_key_selector.py | {
"start": 383,
"end": 5648
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'name': 'str',
'optional': 'bool'
}
attribute_map = {
'key': 'key',
'name': 'name',
'optional': 'optional'
}
def __init__(self, key=None, name=None, optional=None, local_vars_configuration=None): # noqa: E501
"""V1ConfigMapKeySelector - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._name = None
self._optional = None
self.discriminator = None
self.key = key
if name is not None:
self.name = name
if optional is not None:
self.optional = optional
@property
def key(self):
"""Gets the key of this V1ConfigMapKeySelector. # noqa: E501
The key to select. # noqa: E501
:return: The key of this V1ConfigMapKeySelector. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1ConfigMapKeySelector.
The key to select. # noqa: E501
:param key: The key of this V1ConfigMapKeySelector. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def name(self):
"""Gets the name of this V1ConfigMapKeySelector. # noqa: E501
Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this V1ConfigMapKeySelector. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ConfigMapKeySelector.
Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this V1ConfigMapKeySelector. # noqa: E501
:type: str
"""
self._name = name
@property
def optional(self):
"""Gets the optional of this V1ConfigMapKeySelector. # noqa: E501
Specify whether the ConfigMap or its key must be defined # noqa: E501
:return: The optional of this V1ConfigMapKeySelector. # noqa: E501
:rtype: bool
"""
return self._optional
@optional.setter
def optional(self, optional):
"""Sets the optional of this V1ConfigMapKeySelector.
Specify whether the ConfigMap or its key must be defined # noqa: E501
:param optional: The optional of this V1ConfigMapKeySelector. # noqa: E501
:type: bool
"""
self._optional = optional
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ConfigMapKeySelector):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ConfigMapKeySelector):
return True
return self.to_dict() != other.to_dict()
| V1ConfigMapKeySelector |
python | skorch-dev__skorch | skorch/tests/test_dataset.py | {
"start": 11124,
"end": 12581
} | class ____:
@pytest.fixture(scope='module')
def module_cls(self):
"""Return a simple module that concatenates all input values
in forward step.
"""
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dense = nn.Linear(20, 2)
# pylint: disable=arguments-differ
def forward(self, **X):
X = torch.cat(list(X.values()), 1)
X = F.softmax(self.dense(X), dim=-1)
return X
return MyModule
@pytest.fixture(scope='module')
def pd(self):
import pandas as pd
return pd
@pytest.fixture(scope='module')
def data(self, pd):
X, y = make_classification(1000, 20, n_informative=10, random_state=0)
X = X.astype(np.float32)
df = pd.DataFrame(X, columns=map(str, range(X.shape[1])))
return df, y
@pytest.fixture(scope='module')
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture(scope='module')
def net(self, net_cls, module_cls):
return net_cls(
module_cls,
max_epochs=2,
lr=0.1,
)
def test_fit_predict_proba(self, net, data):
X, y = data
net.fit(X, y)
y_proba = net.predict_proba(X)
assert np.allclose(y_proba.sum(1), 1)
| TestNetWithPandas |
python | getsentry__sentry | src/sentry/apidocs/parameters.py | {
"start": 19537,
"end": 19778
} | class ____(serializers.Serializer):
cursor = serializers.CharField(
help_text="A pointer to the last object fetched and its sort order; used to retrieve the next or previous results.",
required=False,
)
| CursorQueryParam |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/event.py | {
"start": 3495,
"end": 4077
} | class ____(BaseEvent):
"""Event fired when attachment processing fails."""
page_id: str = Field(description="ID of the parent page")
attachment_id: str = Field(description="ID of the attachment")
attachment_name: str = Field(description="Name of the attachment")
attachment_type: str = Field(description="MIME type of the attachment")
attachment_size: int = Field(description="Size of the attachment in bytes")
attachment_link: str = Field(description="Link to the attachment")
error: str = Field(description="Error message")
| SNOWKBAttachmentFailedEvent |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.