language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | palantir__python-language-server | pyls/lsp.py | {
"start": 1272,
"end": 1354
} | class ____(object):
NONE = 0
FULL = 1
INCREMENTAL = 2
| TextDocumentSyncKind |
python | getsentry__sentry | src/sentry/discover/apps.py | {
"start": 36,
"end": 133
} | class ____(AppConfig):
name = "sentry.discover"
def ready(self) -> None:
pass
| Config |
python | scrapy__scrapy | scrapy/core/downloader/webclient.py | {
"start": 2982,
"end": 8788
} | class ____(ClientFactory):
protocol = ScrapyHTTPPageGetter
waiting = 1
noisy = False
followRedirect = False
afterFoundGet = False
def _build_response(self, body, request):
request.meta["download_latency"] = self.headers_time - self.start_time
status = int(self.status)
headers = Headers(self.response_headers)
respcls = responsetypes.from_args(headers=headers, url=self._url, body=body)
return respcls(
url=self._url,
status=status,
headers=headers,
body=body,
protocol=to_unicode(self.version),
)
def _set_connection_attributes(self, request):
proxy = request.meta.get("proxy")
if proxy:
proxy_parsed = urlparse(to_bytes(proxy, encoding="ascii"))
self.scheme = proxy_parsed.scheme
self.host = proxy_parsed.hostname
self.port = proxy_parsed.port
self.netloc = proxy_parsed.netloc
if self.port is None:
self.port = 443 if proxy_parsed.scheme == b"https" else 80
self.path = self.url
else:
parsed = urlparse_cached(request)
path_str = urlunparse(
("", "", parsed.path or "/", parsed.params, parsed.query, "")
)
self.path = to_bytes(path_str, encoding="ascii")
assert parsed.hostname is not None
self.host = to_bytes(parsed.hostname, encoding="ascii")
self.port = parsed.port
self.scheme = to_bytes(parsed.scheme, encoding="ascii")
self.netloc = to_bytes(parsed.netloc, encoding="ascii")
if self.port is None:
self.port = 443 if self.scheme == b"https" else 80
def __init__(self, request: Request, timeout: float = 180):
warnings.warn(
"ScrapyHTTPClientFactory is deprecated and will be removed in a future Scrapy version.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
self._url: str = urldefrag(request.url)[0]
# converting to bytes to comply to Twisted interface
self.url: bytes = to_bytes(self._url, encoding="ascii")
self.method: bytes = to_bytes(request.method, encoding="ascii")
self.body: bytes | None = request.body or None
self.headers: Headers = Headers(request.headers)
self.response_headers: Headers | None = None
self.timeout: float = request.meta.get("download_timeout") or timeout
self.start_time: float = time()
self.deferred: defer.Deferred[Response] = defer.Deferred().addCallback(
self._build_response, request
)
# Fixes Twisted 11.1.0+ support as HTTPClientFactory is expected
# to have _disconnectedDeferred. See Twisted r32329.
# As Scrapy implements it's own logic to handle redirects is not
# needed to add the callback _waitForDisconnect.
# Specifically this avoids the AttributeError exception when
# clientConnectionFailed method is called.
self._disconnectedDeferred: defer.Deferred[None] = defer.Deferred()
self._set_connection_attributes(request)
# set Host header based on url
self.headers.setdefault("Host", self.netloc)
# set Content-Length based len of body
if self.body is not None:
self.headers["Content-Length"] = len(self.body)
# just in case a broken http/1.1 decides to keep connection alive
self.headers.setdefault("Connection", "close")
# Content-Length must be specified in POST method even with no body
elif self.method == b"POST":
self.headers["Content-Length"] = 0
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self._url}>"
def _cancelTimeout(self, result, timeoutCall):
if timeoutCall.active():
timeoutCall.cancel()
return result
def buildProtocol(self, addr):
p = ClientFactory.buildProtocol(self, addr)
p.followRedirect = self.followRedirect
p.afterFoundGet = self.afterFoundGet
if self.timeout:
from twisted.internet import reactor
timeoutCall = reactor.callLater(self.timeout, p.timeout)
self.deferred.addBoth(self._cancelTimeout, timeoutCall)
return p
def gotHeaders(self, headers):
self.headers_time = time()
self.response_headers = headers
def gotStatus(self, version, status, message):
"""
Set the status of the request on us.
@param version: The HTTP version.
@type version: L{bytes}
@param status: The HTTP status code, an integer represented as a
bytestring.
@type status: L{bytes}
@param message: The HTTP status message.
@type message: L{bytes}
"""
self.version, self.status, self.message = version, status, message
def page(self, page):
if self.waiting:
self.waiting = 0
self.deferred.callback(page)
def noPage(self, reason):
if self.waiting:
self.waiting = 0
self.deferred.errback(reason)
def clientConnectionFailed(self, _, reason):
"""
When a connection attempt fails, the request cannot be issued. If no
result has yet been provided to the result Deferred, provide the
connection failure reason as an error result.
"""
if self.waiting:
self.waiting = 0
# If the connection attempt failed, there is nothing more to
# disconnect, so just fire that Deferred now.
self._disconnectedDeferred.callback(None)
self.deferred.errback(reason)
| ScrapyHTTPClientFactory |
python | readthedocs__readthedocs.org | readthedocs/audit/serializers.py | {
"start": 769,
"end": 952
} | class ____(serializers.ModelSerializer):
organization = OrganizationSerializer()
class Meta:
model = Team
fields = ["id", "slug", "organization"]
| TeamSerializer |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/vae.py | {
"start": 1057,
"end": 1231
} | class ____(nn.Module):
@nn.compact
def __call__(self, z):
z = nn.Dense(500, name='fc1')(z)
z = nn.relu(z)
z = nn.Dense(784, name='fc2')(z)
return z
| Decoder |
python | huggingface__transformers | src/transformers/generation/candidate_generator.py | {
"start": 17462,
"end": 31275
} | class ____(AssistedCandidateGenerator):
"""
`CandidateGenerator` class to be used for Universal Assisted Generation (UAD): assisted generation with different tokenizers
for the assistant and main models. This class generates candidates through the use of a smaller
model.
The main model input tokens are re-encoded into assistant model tokens, then candidate tokens are generated in the assistant encoding, which are
in turn re-encoded into main model candidate tokens. Validation then proceeds as explained above.
The re-encoding steps involve decoding token ids into text and then encoding the text using a different tokenizer.
Since re-encoding the tokens may result in tokenization discrepancies, UAD finds the longest common subsequence between the source and target encodings,
to ensure the new tokens include the correct prompt suffix.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
assistant_model (`PreTrainedModel`):
The model to be used for generating candidates. This model should be smaller than the main model.
target_tokenizer (`PreTrainedTokenizerBase`):
The tokenizer used for the target model.
assistant_tokenizer (`PreTrainedTokenizerBase`):
The tokenizer used for the assistant model.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call.
logits_processor (`LogitsProcessorList`):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
model_kwargs (`Dict`):
The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant
model as well.
inputs_tensor (`torch.Tensor`, *optional*):
The model input tensor. In encoder-decoder models, this is the encoder input.
"""
def __init__(
self,
input_ids: torch.LongTensor,
assistant_model: "PreTrainedModel",
target_tokenizer: "PreTrainedTokenizerBase",
assistant_tokenizer: "PreTrainedTokenizerBase",
generation_config: "GenerationConfig",
model_kwargs: dict,
inputs_tensor: torch.Tensor | None = None,
logits_processor: Optional["LogitsProcessorList"] = None,
):
super().__init__(input_ids, assistant_model, generation_config, model_kwargs, inputs_tensor, logits_processor)
self.target_tokenizer = target_tokenizer
self.assistant_tokenizer = assistant_tokenizer
self.prev_target_ids_len: int | None = None
self.prev_assistant_ids = None
self.target_lookbehind = assistant_model.generation_config.target_lookbehind
self.assistant_lookbehind = assistant_model.generation_config.assistant_lookbehind
@staticmethod
def _get_longest_diag_dict(input_matrix, nonzero_idx):
"""
Calculates the length of the longest diagonal sequence in a given matrix.
Args:
input_matrix (torch.Tensor): The input matrix.
nonzero_idx (torch.Tensor): The indices of the non-zero elements in the matrix.
Returns:
dict: A dictionary where the keys are the indices of the non-zero elements and the values are the lengths of the longest diagonal sequences starting from those indices.
"""
visited = set()
diags = {}
for idx in nonzero_idx:
start_idx = torch.clone(idx)
tuple_start_idx = tuple(start_idx.tolist())
if tuple_start_idx in visited:
continue
visited.add(tuple_start_idx)
cur_diag_len = 1
start_idx += 1
while start_idx[0] < input_matrix.shape[0] and start_idx[1] < input_matrix.shape[1]:
tuple_start_idx = tuple(start_idx.tolist())
visited.add(tuple_start_idx)
if input_matrix[start_idx[0], start_idx[1]] == 1:
cur_diag_len += 1
start_idx += 1
else:
break
diags[idx] = cur_diag_len
return diags
@staticmethod
def _get_longest_diag_index(input_matrix):
"""
Returns the start index and length of the longest diagonal in the given input.
Args:
input_matrix (numpy.ndarray): The input matrix.
Returns:
tuple: A tuple containing the start index and length of the longest diagonal.
"""
diags = AssistedCandidateGeneratorDifferentTokenizers._get_longest_diag_dict(
input_matrix, input_matrix.nonzero()
)
diags_values = list(diags.values())
diags_keys = list(diags.keys())
best_diag = np.argmax(diags_values)
diag_start_index = diags_keys[best_diag]
diag_start_length = diags_values[best_diag]
return diag_start_index, diag_start_length
@staticmethod
def _get_tokens_diag(prompt, prompt_plus_new_tokens):
"""
Input:
prompt: 2D array of shape (batch_size, prompt_length), represents the original prompt tokens
prompt_plus_new_tokens: 2D array of shape (batch_size, prompt_length), represents the suffix of the original prompt, with additional new tokens.
Output:
discrepancy_length: int, represents the number of tokens that need to be replaced from prompt
new_tokens_only: 2D array of shape (batch_size, new_token_length), represents the new tokens that are not in prompt
discrepancy_only: 2D array of shape (batch_size, discrepancy_length), represents the new tokens that are in prompt but not in prompt_plus_new_tokens
"""
compare_mat = prompt_plus_new_tokens.T == prompt
if not torch.is_tensor(compare_mat):
compare_mat = torch.tensor(compare_mat)
compare_mat_int = compare_mat.to(int)
if not compare_mat_int.any().item():
# empty intersection between prompt and prompt_plus_new_tokens
return None, None, None
longest_location, longest_diag_length = AssistedCandidateGeneratorDifferentTokenizers._get_longest_diag_index(
compare_mat_int
)
new_token_start_index = longest_location[0] + longest_diag_length
discrepancy_with_old = longest_location[1] + longest_diag_length
discrepancy_length = (prompt.shape[1] - discrepancy_with_old).item()
new_tokens_only = prompt_plus_new_tokens[:, new_token_start_index + discrepancy_length :]
discrepancy_only = prompt_plus_new_tokens[
:, new_token_start_index : new_token_start_index + discrepancy_length
]
return discrepancy_length, new_tokens_only, discrepancy_only
def convert_source_tokens_to_target_tokens(
self,
input_ids,
source_tokenizer,
destination_tokenizer,
):
"""
Convert token IDs from one tokenizer to another.
Args:
input_ids: The input token IDs.
source_tokenizer: The source tokenizer.
destination_tokenizer: The destination tokenizer.
Returns:
The converted token IDs.
"""
text = source_tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
dest_ids = destination_tokenizer(text, add_special_tokens=True, return_tensors="pt")["input_ids"]
return dest_ids.to(input_ids.device)
def get_candidates(self, input_ids: torch.LongTensor) -> tuple[torch.LongTensor, torch.FloatTensor | None]:
"""
Fetches the candidates to be tried for the current input.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
Return:
`torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,
vocabulary_size)` containing the logits associated to each candidate.
"""
max_new_tokens = int(self.num_assistant_tokens)
if max_new_tokens == 0:
return input_ids, None
input_ids = input_ids.to(self.assistant_model.device)
remove_from_pkv = 0
assistant_input_ids, remove_from_pkv = self._prepare_assistant_input_ids(input_ids)
self.prev_assistant_ids = assistant_input_ids
min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - assistant_input_ids.shape[-1]), 0)
self._update_past_and_masks(assistant_input_ids, remove_from_pkv)
generation_args = self._prepare_generation_args(assistant_input_ids, min_new_tokens, max_new_tokens)
self.assistant_kwargs.pop("attention_mask", None)
assistant_output = self.assistant_model.generate(**generation_args, **self.assistant_kwargs)
new_target_ids = self._process_assistant_outputs(input_ids, assistant_output.sequences)
# Update state
self.prev_target_ids_len = input_ids.shape[1]
self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values
self.prev_assistant_ids = assistant_output.sequences
if self.prev_target_ids_len >= new_target_ids.shape[1]:
return input_ids, None
return new_target_ids, None
def _prepare_assistant_input_ids(self, input_ids: torch.LongTensor) -> tuple[torch.LongTensor, int]:
"""Converts target input IDs to assistant input IDs, handling discrepancies."""
convert_kwargs = {
"source_tokenizer": self.target_tokenizer,
"destination_tokenizer": self.assistant_tokenizer,
}
remove_from_pkv = 0
if self.prev_assistant_ids is not None and self.prev_target_ids_len > self.target_lookbehind:
# input_ids contains all target prompt input ids and some new target input ids
start_index_in_target_window = self.prev_target_ids_len - self.target_lookbehind
new_assistant_ids = self.convert_source_tokens_to_target_tokens(
input_ids[:, start_index_in_target_window:], **convert_kwargs
)
prompt_use_length = new_assistant_ids.shape[1]
prompt_use = self.prev_assistant_ids[:, -prompt_use_length:]
discrepancy_length, new_tokens_only, discrepancy_only = self._get_tokens_diag(
prompt_use, new_assistant_ids
)
assistant_input_ids = self.prev_assistant_ids
if new_tokens_only is not None:
if discrepancy_length > 0 and discrepancy_only.shape[1] > 0:
if discrepancy_length == discrepancy_only.shape[1]:
assistant_input_ids[:, -discrepancy_length:] = discrepancy_only
elif discrepancy_length > discrepancy_only.shape[1]:
discrepancy_length_diff = discrepancy_length - discrepancy_only.shape[1]
assistant_input_ids = assistant_input_ids[:, :-discrepancy_length_diff]
assistant_input_ids[:, -discrepancy_only.shape[1] :] = discrepancy_only
remove_from_pkv = discrepancy_length
if new_tokens_only.shape[1] > 0:
assistant_input_ids = torch.cat([assistant_input_ids, new_tokens_only], dim=-1)
else:
# edge case: in case of no intersection between prompt and new_assistant_ids
assistant_input_ids = torch.cat([assistant_input_ids, new_assistant_ids], dim=-1)
else:
assistant_input_ids = self.convert_source_tokens_to_target_tokens(input_ids, **convert_kwargs)
self.prev_target_ids_len = input_ids.shape[1]
return assistant_input_ids, remove_from_pkv
def _process_assistant_outputs(
self, input_ids: torch.LongTensor, assistant_sequences: torch.LongTensor
) -> torch.LongTensor:
"""Processes assistant outputs to obtain target input IDs."""
num_prev_assistant = self.prev_assistant_ids.shape[1]
start_assistant_look_index = num_prev_assistant - self.assistant_lookbehind
new_target_ids_from_window = self.convert_source_tokens_to_target_tokens(
assistant_sequences[:, start_assistant_look_index:],
source_tokenizer=self.assistant_tokenizer,
destination_tokenizer=self.target_tokenizer,
)
target_prompt_use_length = new_target_ids_from_window.shape[1]
target_prompt_use = input_ids[:, -target_prompt_use_length:]
_, target_new_tokens_only, _ = self._get_tokens_diag(target_prompt_use, new_target_ids_from_window)
new_target_ids = input_ids
if target_new_tokens_only is not None:
if target_new_tokens_only.shape[1] > 0:
new_target_ids = torch.cat([new_target_ids, target_new_tokens_only], dim=-1)
else:
# edge case: in case of no intersection between prompt and new_target_ids
new_target_ids = torch.cat([new_target_ids, new_target_ids_from_window], dim=-1)
if hasattr(self.generation_config, "max_length"):
new_target_ids = new_target_ids[:, : self.generation_config.max_length]
return new_target_ids
| AssistedCandidateGeneratorDifferentTokenizers |
python | numba__numba | numba/tests/matmul_usecase.py | {
"start": 136,
"end": 552
} | class ____(object):
def __init__(self, value):
self.value = value
def __matmul__(self, other):
if isinstance(other, DumbMatrix):
return DumbMatrix(self.value * other.value)
return NotImplemented
def __imatmul__(self, other):
if isinstance(other, DumbMatrix):
self.value *= other.value
return self
return NotImplemented
| DumbMatrix |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_cond_format20.py | {
"start": 315,
"end": 1365
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format20.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditionalFormatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1:A4",
{
"type": "icon_set",
"icon_style": "3_arrows",
"icons": [
{"criteria": ">", "type": "percent", "value": 0},
{"criteria": "<", "type": "percent", "value": 0},
{"criteria": ">=", "type": "percent", "value": 0},
],
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | google__pytype | pytype/utils_test.py | {
"start": 71,
"end": 2784
} | class ____(unittest.TestCase):
"""Test generic utilities."""
def test_pretty_dnf(self):
dnf = [["a", "b"], "c", ["d", "e", "f"]]
self.assertEqual(utils.pretty_dnf(dnf), "(a & b) | c | (d & e & f)")
def test_list_strip_prefix(self):
self.assertEqual([1, 2, 3], utils.list_strip_prefix([1, 2, 3], []))
self.assertEqual([2, 3], utils.list_strip_prefix([1, 2, 3], [1]))
self.assertEqual([3], utils.list_strip_prefix([1, 2, 3], [1, 2]))
self.assertEqual([], utils.list_strip_prefix([1, 2, 3], [1, 2, 3]))
self.assertEqual(
[1, 2, 3], utils.list_strip_prefix([1, 2, 3], [0, 1, 2, 3])
)
self.assertEqual([], utils.list_strip_prefix([], [1, 2, 3]))
self.assertEqual(
list("wellington"),
utils.list_strip_prefix(list("newwellington"), list("new")),
)
self.assertEqual(
"a.somewhat.long.path.src2.d3.shrdlu".split("."),
utils.list_strip_prefix(
"top.a.somewhat.long.path.src2.d3.shrdlu".split("."),
"top".split("."),
),
)
def test_list_starts_with(self):
self.assertTrue(utils.list_startswith([1, 2, 3], []))
self.assertTrue(utils.list_startswith([1, 2, 3], [1]))
self.assertTrue(utils.list_startswith([1, 2, 3], [1, 2]))
self.assertTrue(utils.list_startswith([1, 2, 3], [1, 2, 3]))
self.assertFalse(utils.list_startswith([1, 2, 3], [2]))
self.assertTrue(utils.list_startswith([], []))
self.assertFalse(utils.list_startswith([], [1]))
def test_invert_dict(self):
a = {"p": ["q", "r"], "x": ["q", "z"]}
b = utils.invert_dict(a)
self.assertCountEqual(b["q"], ["p", "x"])
self.assertEqual(b["r"], ["p"])
self.assertEqual(b["z"], ["x"])
def test_dynamic_var(self):
var = utils.DynamicVar()
self.assertIsNone(var.get())
with var.bind(123):
self.assertEqual(123, var.get())
with var.bind(456):
self.assertEqual(456, var.get())
self.assertEqual(123, var.get())
self.assertIsNone(var.get())
def test_version_from_string(self):
self.assertEqual(utils.version_from_string("3.7"), (3, 7))
def test_validate_version(self):
old = utils._VALIDATE_PYTHON_VERSION_UPPER_BOUND
utils._VALIDATE_PYTHON_VERSION_UPPER_BOUND = True
self._validate_version_helper((1, 1))
self._validate_version_helper((2, 1))
self._validate_version_helper((2, 8))
self._validate_version_helper((3, 1))
self._validate_version_helper((3, 42))
utils._VALIDATE_PYTHON_VERSION_UPPER_BOUND = old
def _validate_version_helper(self, python_version):
with self.assertRaises(utils.UsageError):
utils.validate_version(python_version)
def _make_tuple(x):
return tuple(range(x))
| UtilsTest |
python | huggingface__transformers | src/transformers/models/siglip2/modular_siglip2.py | {
"start": 5080,
"end": 9379
} | class ____(nn.Module):
def __init__(self, config: Siglip2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.patch_size = config.patch_size
self.patch_embedding = nn.Linear(
in_features=config.num_channels * self.patch_size * self.patch_size,
out_features=self.embed_dim,
)
self.num_patches = config.num_patches
self.position_embedding_size = int(self.num_patches**0.5)
self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
@staticmethod
def resize_positional_embeddings(
positional_embeddings: torch.Tensor,
spatial_shapes: torch.LongTensor,
max_length: int,
) -> torch.Tensor:
"""
Resize positional embeddings to image-specific size and pad to a fixed size.
Args:
positional_embeddings (`torch.Tensor`):
Position embeddings of shape (height, width, embed_dim)
spatial_shapes (`torch.LongTensor`):
Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
max_length (`int`):
Maximum length of the positional embeddings to pad resized positional embeddings to
Returns:
`torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)
"""
batch_size = spatial_shapes.shape[0]
embed_dim = positional_embeddings.shape[-1]
source_dtype = positional_embeddings.dtype
resulted_positional_embeddings = torch.empty(
(batch_size, max_length, embed_dim),
device=positional_embeddings.device,
dtype=source_dtype,
)
# (height, width, embed_dim) -> (1, embed_dim, height, width) for interpolation
positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0)
# Upcast to float32 on CPU because antialias is not supported for bfloat16/float16 on CPU
if positional_embeddings.device.type == "cpu":
positional_embeddings = positional_embeddings.to(torch.float32)
for i in range(batch_size):
# (1, dim, height, width) -> (1, dim, target_height, target_width)
height, width = spatial_shapes[i]
resized_embeddings = F.interpolate(
positional_embeddings,
size=(height, width),
mode="bilinear",
align_corners=False,
antialias=True,
)
# (1, dim, target_height, target_width) -> (target_height * target_width, dim)
resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1)
# Cast to original dtype
resized_embeddings = resized_embeddings.to(source_dtype)
resulted_positional_embeddings[i, : height * width] = resized_embeddings
resulted_positional_embeddings[i, height * width :] = resized_embeddings[0]
return resulted_positional_embeddings
def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor:
"""
Args:
pixel_values (`torch.FloatTensor`):
Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)
spatial_shapes (`list[tuple[int, int]]`):
Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
"""
# Apply patch embeddings to already patchified pixel values
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
# Get positional resized and padded positional embeddings
positional_embeddings = self.position_embedding.weight.reshape(
self.position_embedding_size, self.position_embedding_size, -1
)
resized_positional_embeddings = self.resize_positional_embeddings(
positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1]
)
# Add positional embeddings to patch embeddings
embeddings = patch_embeds + resized_positional_embeddings
return embeddings
| Siglip2VisionEmbeddings |
python | donnemartin__interactive-coding-challenges | recursion_dynamic/hanoi/test_hanoi.py | {
"start": 18,
"end": 1002
} | class ____(unittest.TestCase):
def test_hanoi(self):
hanoi = Hanoi()
num_disks = 3
src = Stack()
buff = Stack()
dest = Stack()
print('Test: None towers')
self.assertRaises(TypeError, hanoi.move_disks, num_disks, None, None, None)
print('Test: 0 disks')
hanoi.move_disks(num_disks, src, dest, buff)
self.assertEqual(dest.pop(), None)
print('Test: 1 disk')
src.push(5)
hanoi.move_disks(num_disks, src, dest, buff)
self.assertEqual(dest.pop(), 5)
print('Test: 2 or more disks')
for disk_index in range(num_disks, -1, -1):
src.push(disk_index)
hanoi.move_disks(num_disks, src, dest, buff)
for disk_index in range(0, num_disks):
self.assertEqual(dest.pop(), disk_index)
print('Success: test_hanoi')
def main():
test = TestHanoi()
test.test_hanoi()
if __name__ == '__main__':
main()
| TestHanoi |
python | nryoung__algorithms | tests/test_math.py | {
"start": 3609,
"end": 4252
} | class ____(unittest.TestCase):
def test_eratosthenes(self):
rv1 = eratosthenes(-10)
rv2 = eratosthenes(10)
rv3 = eratosthenes(100, 5)
rv4 = eratosthenes(100, -10)
self.assertEqual(rv1, [])
self.assertEqual(rv2, [2, 3, 5, 7])
self.assertEqual(
rv3,
[5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,
67, 71, 73, 79, 83, 89, 97]
)
self.assertEqual(
rv4,
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97]
)
| TestSieveOfEratosthenes |
python | getsentry__sentry | src/sentry/ingest/transaction_clusterer/normalization.py | {
"start": 2335,
"end": 4161
} | class ____:
ty: str
rule_id: str
range: tuple[int, int]
def serialize(self) -> list:
return [self.rule_id, self.ty, self.range[0], self.range[1]]
# Ported from Relay:
# https://github.com/getsentry/relay/blob/aad4b6099d12422e88dd5df49abae11247efdd99/relay-event-normalization/src/transactions/processor.rs#L350
def _scrub_identifiers(segment_span: CompatibleSpan, segment_name: str):
matches = TRANSACTION_NAME_NORMALIZER_REGEX.finditer(segment_name)
remarks = []
for m in matches:
remarks.extend(
[
Remark(ty="s", rule_id=group_name, range=(m.start(group_name), m.end(group_name)))
for group_name in m.groupdict().keys()
if m.start(group_name) > -1
]
)
if len(remarks) == 0:
return
remarks.sort(key=lambda remark: remark.range[1])
str_parts: list[str] = []
last_end = 0
for remark in remarks:
start, end = remark.range
str_parts.append(segment_name[last_end:start])
str_parts.append("*")
last_end = end
str_parts.append(segment_name[last_end:])
normalized_segment_name = "".join(str_parts)
segment_span["name"] = normalized_segment_name
attributes = segment_span.get("attributes") or {}
attributes[ATTRIBUTE_NAMES.SENTRY_SEGMENT_NAME] = {
"type": "string",
"value": normalized_segment_name,
}
attributes[ATTRIBUTE_NAMES.SENTRY_SPAN_SOURCE] = {
"type": "string",
"value": TRANSACTION_SOURCE_SANITIZED,
}
attributes[f"sentry._meta.fields.attributes.{ATTRIBUTE_NAMES.SENTRY_SEGMENT_NAME}"] = {
"type": "string",
"value": orjson.dumps({"meta": {"": {"rem": [r.serialize() for r in remarks]}}}).decode(),
}
segment_span["attributes"] = attributes
| Remark |
python | Textualize__rich | rich/markdown.py | {
"start": 592,
"end": 2236
} | class ____:
new_line: ClassVar[bool] = True
@classmethod
def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
"""Factory to create markdown element,
Args:
markdown (Markdown): The parent Markdown object.
token (Token): A node from markdown-it.
Returns:
MarkdownElement: A new markdown element
"""
return cls()
def on_enter(self, context: MarkdownContext) -> None:
"""Called when the node is entered.
Args:
context (MarkdownContext): The markdown context.
"""
def on_text(self, context: MarkdownContext, text: TextType) -> None:
"""Called when text is parsed.
Args:
context (MarkdownContext): The markdown context.
"""
def on_leave(self, context: MarkdownContext) -> None:
"""Called when the parser leaves the element.
Args:
context (MarkdownContext): [description]
"""
def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
"""Called when a child element is closed.
This method allows a parent element to take over rendering of its children.
Args:
context (MarkdownContext): The markdown context.
child (MarkdownElement): The child markdown element.
Returns:
bool: Return True to render the element, or False to not render the element.
"""
return True
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
return ()
| MarkdownElement |
python | getsentry__sentry | src/sentry/seer/endpoints/project_seer_preferences.py | {
"start": 1175,
"end": 2008
} | class ____(CamelSnakeSerializer):
organization_id = serializers.IntegerField(required=True)
integration_id = serializers.CharField(required=True)
provider = serializers.CharField(required=True)
owner = serializers.CharField(required=True)
name = serializers.CharField(required=True)
external_id = serializers.CharField(required=True)
branch_name = serializers.CharField(required=False, allow_null=True, allow_blank=True)
branch_overrides = BranchOverrideSerializer(
many=True,
required=False,
allow_null=True,
)
instructions = serializers.CharField(required=False, allow_null=True, allow_blank=True)
base_commit_sha = serializers.CharField(required=False, allow_null=True)
provider_raw = serializers.CharField(required=False, allow_null=True)
| RepositorySerializer |
python | gabrielfalcao__HTTPretty | tests/bugfixes/pytest/test_426_mypy_segfault.py | {
"start": 2484,
"end": 2775
} | class ____(unittest.TestCase, metaclass=GenerateTests):
__generate_count__ = 10000
def __generate_method__(test_name):
@httpretty.httprettified(allow_net_connect=False)
def test_func(self):
pass
return test_func
| TestBug426MypySegfaultWithEmptyMethod |
python | ray-project__ray | python/ray/train/tests/test_tensorflow_checkpoint.py | {
"start": 461,
"end": 1173
} | class ____(Preprocessor):
def __init__(self, multiplier):
self.multiplier = multiplier
def transform_batch(self, df):
return df * self.multiplier
def get_model():
return tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=()),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(1),
]
)
def compare_weights(w1: List[ndarray], w2: List[ndarray]) -> bool:
if not len(w1) == len(w2):
return False
size = len(w1)
for i in range(size):
comparison = w1[i] == w2[i]
if not comparison.all():
return False
return True
| DummyPreprocessor |
python | redis__redis-py | redis/event.py | {
"start": 6809,
"end": 7450
} | class ____:
"""
Event that will be fired after async cluster instance was created.
Async cluster doesn't use connection pools,
instead ClusterNode object manages connections.
"""
def __init__(
self,
nodes: dict,
credential_provider: Optional[CredentialProvider] = None,
):
self._nodes = nodes
self._credential_provider = credential_provider
@property
def nodes(self) -> dict:
return self._nodes
@property
def credential_provider(self) -> Union[CredentialProvider, None]:
return self._credential_provider
| AfterAsyncClusterInstantiationEvent |
python | django__django | tests/settings_tests/tests.py | {
"start": 12023,
"end": 12810
} | class ____(SimpleTestCase):
def setUp(self):
self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy()
signals.COMPLEX_OVERRIDE_SETTINGS.add("TEST_WARN")
def tearDown(self):
signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings
self.assertNotIn("TEST_WARN", signals.COMPLEX_OVERRIDE_SETTINGS)
def test_complex_override_warning(self):
"""Regression test for #19031"""
msg = "Overriding setting TEST_WARN can lead to unexpected behavior."
with self.assertWarnsMessage(UserWarning, msg) as cm:
with override_settings(TEST_WARN="override"):
self.assertEqual(settings.TEST_WARN, "override")
self.assertEqual(cm.filename, __file__)
| TestComplexSettingOverride |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 9249,
"end": 9698
} | class ____(LocalizableStreamlitException):
"""Exception raised when an invalid text_alignment value is provided."""
def __init__(self, text_alignment: Any) -> None:
super().__init__(
'Invalid text_alignment value: "{text_alignment}". '
'Valid values are: `"left"`, `"center"`, `"right"`, or `"justify"`.',
text_alignment=text_alignment,
)
# st.multiselect
| StreamlitInvalidTextAlignmentError |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_gcs.py | {
"start": 4207,
"end": 8904
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.gcs.GCSHook")
def test_delete_objects(self, mock_hook):
operator = GCSDeleteObjectsOperator(task_id=TASK_ID, bucket_name=TEST_BUCKET, objects=MOCK_FILES[0:2])
operator.execute(None)
mock_hook.return_value.list.assert_not_called()
mock_hook.return_value.delete.assert_has_calls(
calls=[
mock.call(bucket_name=TEST_BUCKET, object_name=MOCK_FILES[0]),
mock.call(bucket_name=TEST_BUCKET, object_name=MOCK_FILES[1]),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.operators.gcs.GCSHook")
def test_delete_empty_list_of_objects(self, mock_hook):
operator = GCSDeleteObjectsOperator(task_id=TASK_ID, bucket_name=TEST_BUCKET, objects=[])
operator.execute(None)
mock_hook.return_value.list.assert_not_called()
mock_hook.return_value.delete.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.gcs.GCSHook")
def test_delete_prefix(self, mock_hook):
mock_hook.return_value.list.return_value = MOCK_FILES[1:4]
operator = GCSDeleteObjectsOperator(task_id=TASK_ID, bucket_name=TEST_BUCKET, prefix=PREFIX)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(bucket_name=TEST_BUCKET, prefix=PREFIX)
mock_hook.return_value.delete.assert_has_calls(
calls=[
mock.call(bucket_name=TEST_BUCKET, object_name=MOCK_FILES[1]),
mock.call(bucket_name=TEST_BUCKET, object_name=MOCK_FILES[2]),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.operators.gcs.GCSHook")
def test_delete_prefix_as_empty_string(self, mock_hook):
mock_hook.return_value.list.return_value = MOCK_FILES[0:4]
operator = GCSDeleteObjectsOperator(task_id=TASK_ID, bucket_name=TEST_BUCKET, prefix="")
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(bucket_name=TEST_BUCKET, prefix="")
mock_hook.return_value.delete.assert_has_calls(
calls=[
mock.call(bucket_name=TEST_BUCKET, object_name=MOCK_FILES[0]),
mock.call(bucket_name=TEST_BUCKET, object_name=MOCK_FILES[1]),
mock.call(bucket_name=TEST_BUCKET, object_name=MOCK_FILES[2]),
mock.call(bucket_name=TEST_BUCKET, object_name=MOCK_FILES[3]),
],
any_order=True,
)
@pytest.mark.parametrize(
("objects", "prefix", "inputs"),
(
(["folder/a.txt", "b.json"], None, ["folder/a.txt", "b.json"]),
(["folder/a.txt", "folder/b.json"], None, ["folder/a.txt", "folder/b.json"]),
(None, ["folder/a.txt", "b.json"], ["folder/a.txt", "b.json"]),
(None, "dir/pre", ["dir"]),
(None, ["dir/"], ["dir"]),
(None, "", ["/"]),
(None, "/", ["/"]),
(None, "pre", ["/"]),
(None, "dir/pre*", ["dir"]),
(None, "*", ["/"]),
),
ids=(
"objects",
"multiple objects in the same dir",
"objects as prefixes",
"directory with prefix",
"directory",
"empty prefix",
"slash as prefix",
"prefix with no ending slash",
"directory with prefix with wildcard",
"just wildcard",
),
)
def test_get_openlineage_facets_on_start(self, objects, prefix, inputs):
bucket_url = f"gs://{TEST_BUCKET}"
expected_inputs = [
Dataset(
namespace=bucket_url,
name=name,
facets={
"lifecycleStateChange": LifecycleStateChangeDatasetFacet(
lifecycleStateChange=LifecycleStateChange.DROP.value,
previousIdentifier=PreviousIdentifier(
namespace=bucket_url,
name=name,
),
)
},
)
for name in inputs
]
operator = GCSDeleteObjectsOperator(
task_id=TASK_ID, bucket_name=TEST_BUCKET, objects=objects, prefix=prefix
)
lineage = operator.get_openlineage_facets_on_start()
assert len(lineage.inputs) == len(inputs)
assert len(lineage.outputs) == 0
assert all(element in lineage.inputs for element in expected_inputs)
assert all(element in expected_inputs for element in lineage.inputs)
| TestGCSDeleteObjectsOperator |
python | scipy__scipy | scipy/io/_harwell_boeing/_fortran_format_parser.py | {
"start": 6215,
"end": 9003
} | class ____:
"""Parser for Fortran format strings. The parse method returns a *Format
instance.
Notes
-----
Only ExpFormat (exponential format for floating values) and IntFormat
(integer format) for now.
"""
def __init__(self):
self.tokenizer = threading.local()
def parse(self, s):
if not hasattr(self.tokenizer, 't'):
self.tokenizer.t = Tokenizer()
self.tokenizer.t.input(s)
tokens = []
try:
while True:
t = self.tokenizer.t.next_token()
if t is None:
break
else:
tokens.append(t)
return self._parse_format(tokens)
except SyntaxError as e:
raise BadFortranFormat(str(e)) from e
def _get_min(self, tokens):
next = tokens.pop(0)
if not next.type == "DOT":
raise SyntaxError()
next = tokens.pop(0)
return next.value
def _expect(self, token, tp):
if not token.type == tp:
raise SyntaxError()
def _parse_format(self, tokens):
if not tokens[0].type == "LPAR":
raise SyntaxError(
f"Expected left parenthesis at position {0} (got '{tokens[0].value}')"
)
elif not tokens[-1].type == "RPAR":
raise SyntaxError("Expected right parenthesis at position "
f"{len(tokens)} (got '{tokens[-1].value}')")
tokens = tokens[1:-1]
types = [t.type for t in tokens]
if types[0] == "INT":
repeat = int(tokens.pop(0).value)
else:
repeat = None
next = tokens.pop(0)
if next.type == "INT_ID":
next = self._next(tokens, "INT")
width = int(next.value)
if tokens:
min = int(self._get_min(tokens))
else:
min = None
return IntFormat(width, min, repeat)
elif next.type == "EXP_ID":
next = self._next(tokens, "INT")
width = int(next.value)
next = self._next(tokens, "DOT")
next = self._next(tokens, "INT")
significand = int(next.value)
if tokens:
next = self._next(tokens, "EXP_ID")
next = self._next(tokens, "INT")
min = int(next.value)
else:
min = None
return ExpFormat(width, significand, min, repeat)
else:
raise SyntaxError(f"Invalid formatter type {next.value}")
def _next(self, tokens, tp):
if not len(tokens) > 0:
raise SyntaxError()
next = tokens.pop(0)
self._expect(next, tp)
return next
| FortranFormatParser |
python | getsentry__sentry | tests/sentry/users/api/bases/test_user.py | {
"start": 4842,
"end": 4943
} | class ____(BaseUserEndpointTest):
endpoint = UserEndpoint()
@control_silo_test
| MonolithUserEndpoint |
python | doocs__leetcode | solution/2600-2699/2641.Cousins in Binary Tree II/Solution.py | {
"start": 192,
"end": 1102
} | class ____:
def replaceValueInTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
def dfs1(root: Optional[TreeNode], depth: int):
if root is None:
return
if len(s) <= depth:
s.append(0)
s[depth] += root.val
dfs1(root.left, depth + 1)
dfs1(root.right, depth + 1)
def dfs2(root: Optional[TreeNode], depth: int):
sub = (root.left.val if root.left else 0) + (
root.right.val if root.right else 0
)
depth += 1
if root.left:
root.left.val = s[depth] - sub
dfs2(root.left, depth)
if root.right:
root.right.val = s[depth] - sub
dfs2(root.right, depth)
s = []
dfs1(root, 0)
root.val = 0
dfs2(root, 0)
return root
| Solution |
python | huggingface__transformers | src/transformers/models/blip/image_processing_blip_fast.py | {
"start": 871,
"end": 1312
} | class ____(BaseImageProcessorFast):
# To be checked against the slow image processor
# None values left after checking can be removed
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
__all__ = ["BlipImageProcessorFast"]
| BlipImageProcessorFast |
python | scrapy__scrapy | tests/test_feedexport.py | {
"start": 99726,
"end": 103268
} | class ____(ABC):
spider_name = "uri_params_spider"
deprecated_options = False
@abstractmethod
def build_settings(self, uri="file:///tmp/foobar", uri_params=None):
raise NotImplementedError
def _crawler_feed_exporter(self, settings):
if self.deprecated_options:
with pytest.warns(
ScrapyDeprecationWarning,
match="The `FEED_URI` and `FEED_FORMAT` settings have been deprecated",
):
crawler = get_crawler(settings_dict=settings)
else:
crawler = get_crawler(settings_dict=settings)
feed_exporter = crawler.get_extension(FeedExporter)
return crawler, feed_exporter
def test_default(self):
settings = self.build_settings(
uri="file:///tmp/%(name)s",
)
crawler, feed_exporter = self._crawler_feed_exporter(settings)
spider = scrapy.Spider(self.spider_name)
spider.crawler = crawler
with warnings.catch_warnings():
warnings.simplefilter("error", ScrapyDeprecationWarning)
feed_exporter.open_spider(spider)
assert feed_exporter.slots[0].uri == f"file:///tmp/{self.spider_name}"
def test_none(self):
def uri_params(params, spider):
pass
settings = self.build_settings(
uri="file:///tmp/%(name)s",
uri_params=uri_params,
)
crawler, feed_exporter = self._crawler_feed_exporter(settings)
spider = scrapy.Spider(self.spider_name)
spider.crawler = crawler
feed_exporter.open_spider(spider)
assert feed_exporter.slots[0].uri == f"file:///tmp/{self.spider_name}"
def test_empty_dict(self):
def uri_params(params, spider):
return {}
settings = self.build_settings(
uri="file:///tmp/%(name)s",
uri_params=uri_params,
)
crawler, feed_exporter = self._crawler_feed_exporter(settings)
spider = scrapy.Spider(self.spider_name)
spider.crawler = crawler
with warnings.catch_warnings():
warnings.simplefilter("error", ScrapyDeprecationWarning)
with pytest.raises(KeyError):
feed_exporter.open_spider(spider)
def test_params_as_is(self):
def uri_params(params, spider):
return params
settings = self.build_settings(
uri="file:///tmp/%(name)s",
uri_params=uri_params,
)
crawler, feed_exporter = self._crawler_feed_exporter(settings)
spider = scrapy.Spider(self.spider_name)
spider.crawler = crawler
with warnings.catch_warnings():
warnings.simplefilter("error", ScrapyDeprecationWarning)
feed_exporter.open_spider(spider)
assert feed_exporter.slots[0].uri == f"file:///tmp/{self.spider_name}"
def test_custom_param(self):
def uri_params(params, spider):
return {**params, "foo": self.spider_name}
settings = self.build_settings(
uri="file:///tmp/%(foo)s",
uri_params=uri_params,
)
crawler, feed_exporter = self._crawler_feed_exporter(settings)
spider = scrapy.Spider(self.spider_name)
spider.crawler = crawler
with warnings.catch_warnings():
warnings.simplefilter("error", ScrapyDeprecationWarning)
feed_exporter.open_spider(spider)
assert feed_exporter.slots[0].uri == f"file:///tmp/{self.spider_name}"
| TestURIParams |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/compute_log_manager.py | {
"start": 936,
"end": 1225
} | class ____(BaseModel):
bucket: StringSource
localDir: Optional[StringSource] = None
prefix: Optional[StringSource] = None
jsonCredentialsEnvvar: Optional[StringSource] = None
uploadInterval: Optional[int] = None
showUrlOnly: Optional[bool] = None
| GCSComputeLogManager |
python | pennersr__django-allauth | allauth/socialaccount/providers/questrade/views.py | {
"start": 181,
"end": 1166
} | class ____(OAuth2Adapter):
provider_id = "questrade"
access_token_url = "https://login.questrade.com/oauth2/token" # nosec
authorize_url = "https://login.questrade.com/oauth2/authorize"
supports_state = False
def complete_login(self, request, app, token, **kwargs):
api_server = kwargs.get("response", {}).get(
"api_server", "https://api01.iq.questrade.com/"
)
resp = (
get_adapter()
.get_requests_session()
.get(
"{}v1/accounts".format(api_server),
headers={"Authorization": "Bearer {}".format(token.token)},
)
)
resp.raise_for_status()
data = resp.json()
data.update(kwargs)
return self.get_provider().sociallogin_from_response(request, data)
oauth2_login = OAuth2LoginView.adapter_view(QuestradeOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(QuestradeOAuth2Adapter)
| QuestradeOAuth2Adapter |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 129598,
"end": 132138
} | class ____(fixtures.TestBase):
"""Test postgresql-specific UUID cases.
See also generic UUID tests in testing/suite/test_types
"""
__only_on__ = "postgresql >= 8.3"
__backend__ = True
@testing.combinations(
(
"not_as_uuid",
postgresql.UUID(as_uuid=False),
str(uuid.uuid4()),
str(uuid.uuid4()),
),
("as_uuid", postgresql.UUID(as_uuid=True), uuid.uuid4(), uuid.uuid4()),
id_="iaaa",
argnames="datatype, value1, value2",
)
def test_round_trip(self, datatype, value1, value2, connection):
utable = Table("utable", MetaData(), Column("data", datatype))
utable.create(connection)
connection.execute(utable.insert(), {"data": value1})
connection.execute(utable.insert(), {"data": value2})
r = connection.execute(
select(utable.c.data).where(utable.c.data != value1)
)
eq_(r.fetchone()[0], value2)
eq_(r.fetchone(), None)
@testing.combinations(
(
"as_uuid",
postgresql.ARRAY(postgresql.UUID(as_uuid=True)),
[uuid.uuid4(), uuid.uuid4()],
[uuid.uuid4(), uuid.uuid4()],
),
(
"not_as_uuid",
postgresql.ARRAY(postgresql.UUID(as_uuid=False)),
[str(uuid.uuid4()), str(uuid.uuid4())],
[str(uuid.uuid4()), str(uuid.uuid4())],
),
id_="iaaa",
argnames="datatype, value1, value2",
)
# passes pg8000 as of 1.19.1
def test_uuid_array(self, datatype, value1, value2, connection):
self.test_round_trip(datatype, value1, value2, connection)
@testing.combinations(
(
"not_as_uuid",
postgresql.UUID(as_uuid=False),
str(uuid.uuid4()),
),
(
"as_uuid",
postgresql.UUID(as_uuid=True),
uuid.uuid4(),
),
id_="iaa",
argnames="datatype, value1",
)
def test_uuid_literal(self, datatype, value1, connection):
v1 = connection.execute(
select(
bindparam(
"key",
value=value1,
literal_execute=True,
type_=datatype,
)
),
)
eq_(v1.fetchone()[0], value1)
def test_python_type(self):
eq_(postgresql.UUID(as_uuid=True).python_type, uuid.UUID)
eq_(postgresql.UUID(as_uuid=False).python_type, str)
| UUIDTest |
python | neetcode-gh__leetcode | python/2017-grid-game.py | {
"start": 26,
"end": 310
} | class ____(object):
def gridGame(self, grid):
result = float("inf")
left, right = 0, sum(grid[0])
for a, b in zip(grid[0], grid[1]):
right -= a
result = min(result, max(left, right))
left += b
return result
| Solution |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 61882,
"end": 62136
} | class ____:
xlFileValidationPivotDefault = 0 # from enum XlFileValidationPivotMode
xlFileValidationPivotRun = 1 # from enum XlFileValidationPivotMode
xlFileValidationPivotSkip = 2 # from enum XlFileValidationPivotMode
| FileValidationPivotMode |
python | pennersr__django-allauth | allauth/headless/account/views.py | {
"start": 1848,
"end": 2284
} | class ____(APIView):
input_class = RequestLoginCodeInput
def post(self, request, *args, **kwargs):
flows.login_by_code.LoginCodeVerificationProcess.initiate(
request=self.request,
user=self.input._user,
email=self.input.cleaned_data.get("email"),
phone=self.input.cleaned_data.get("phone"),
)
return AuthenticationResponse(self.request)
| RequestLoginCodeView |
python | getsentry__sentry | src/sentry/auth/providers/saml2/jumpcloud/apps.py | {
"start": 36,
"end": 279
} | class ____(AppConfig):
name = "sentry.auth.providers.saml2.jumpcloud"
def ready(self) -> None:
from sentry.auth import register
from .provider import JumpcloudSAML2Provider
register(JumpcloudSAML2Provider)
| Config |
python | apache__airflow | airflow-core/tests/unit/plugins/test_plugin.py | {
"start": 3650,
"end": 4622
} | class ____(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
return await call_next(request)
middleware_with_metadata = {
"middleware": DummyMiddleware,
"args": [],
"kwargs": {},
"name": "Name of the Middleware",
}
external_view_with_metadata = {
"name": "Test IFrame Airflow Docs",
"href": "https://airflow.apache.org/",
"icon": "https://raw.githubusercontent.com/lucide-icons/lucide/refs/heads/main/icons/plug.svg",
"url_route": "test_iframe_plugin",
"destination": "nav",
"category": "browse",
}
react_app_with_metadata = {
"name": "Test React App",
"bundle_url": "https://example.com/test-plugin-bundle.js",
"icon": "https://raw.githubusercontent.com/lucide-icons/lucide/refs/heads/main/icons/plug.svg",
"url_route": "test_react_app",
"destination": "nav",
"category": "browse",
}
# Extend an existing class to avoid the need to implement the full interface
| DummyMiddleware |
python | realpython__materials | python-sequences/shape.py | {
"start": 0,
"end": 662
} | class ____:
"""
A ShapePoints object represents a collection of points
Attributes:
- points: sequence of points, where each point is a
tuple (x, y)
"""
def __init__(self, points):
self.points = list(points)
if points and self.points[0] != self.points[-1]:
self.points.append(self.points[0])
def __repr__(self):
return f"ShapePoints({self.points})"
def __getitem__(self, index):
return self.points[index]
def __len__(self):
if self.points:
return len(self.points) - 1
return 0
def __iter__(self):
return iter(self.points)
| ShapePoints |
python | ipython__ipython | IPython/core/ultratb.py | {
"start": 3726,
"end": 16211
} | class ____(TBTools):
"""Print traceback information from a traceback list, with optional color.
Calling requires 3 arguments: (etype, evalue, elist)
as would be obtained by::
etype, evalue, tb = sys.exc_info()
if tb:
elist = traceback.extract_tb(tb)
else:
elist = None
It can thus be used by programs which need to process the traceback before
printing (such as console replacements based on the code module from the
standard library).
Because they are meant to be called without a full traceback (only a
list), instances of this class can't call the interactive pdb debugger."""
def __call__(
self,
etype: type[BaseException],
evalue: BaseException | None,
etb: TracebackType | None,
) -> None:
self.ostream.flush()
self.ostream.write(self.text(etype, evalue, etb))
self.ostream.write("\n")
def _extract_tb(self, tb: TracebackType | None) -> traceback.StackSummary | None:
if tb:
return traceback.extract_tb(tb)
else:
return None
def structured_traceback(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType] = None,
tb_offset: Optional[int] = None,
context: int = 5,
) -> list[str]:
"""Return a color formatted string with the traceback info.
Parameters
----------
etype : exception type
Type of the exception raised.
evalue : object
Data stored in the exception
etb : list | TracebackType | None
If list: List of frames, see class docstring for details.
If Traceback: Traceback of the exception.
tb_offset : int, optional
Number of frames in the traceback to skip. If not given, the
instance evalue is used (set in constructor).
context : int, optional
Number of lines of context information to print.
Returns
-------
String with formatted exception.
"""
# This is a workaround to get chained_exc_ids in recursive calls
# etb should not be a tuple if structured_traceback is not recursive
if isinstance(etb, tuple):
etb, chained_exc_ids = etb
else:
chained_exc_ids = set()
elist: list[Any]
if isinstance(etb, list):
elist = etb
elif etb is not None:
elist = self._extract_tb(etb) # type: ignore[assignment]
else:
elist = []
tb_offset = self.tb_offset if tb_offset is None else tb_offset
assert isinstance(tb_offset, int)
out_list: list[str] = []
if elist:
if tb_offset and len(elist) > tb_offset:
elist = elist[tb_offset:]
out_list.append(
theme_table[self._theme_name].format(
[
(Token, "Traceback"),
(Token, " "),
(Token.NormalEm, "(most recent call last)"),
(Token, ":"),
(Token, "\n"),
]
),
)
out_list.extend(self._format_list(elist))
# The exception info should be a single entry in the list.
lines = "".join(self._format_exception_only(etype, evalue))
out_list.append(lines)
# Find chained exceptions if we have a traceback (not for exception-only mode)
if etb is not None:
exception = self.get_parts_of_chained_exception(evalue)
if exception and (id(exception[1]) not in chained_exc_ids):
chained_exception_message: list[str] = (
self.prepare_chained_exception_message(evalue.__cause__)[0]
if evalue is not None
else [""]
)
etype, evalue, etb = exception
# Trace exception to avoid infinite 'cause' loop
chained_exc_ids.add(id(exception[1]))
chained_exceptions_tb_offset = 0
ol1 = self.structured_traceback(
etype,
evalue,
(etb, chained_exc_ids), # type: ignore[arg-type]
chained_exceptions_tb_offset,
context,
)
ol2 = chained_exception_message
out_list = ol1 + ol2 + out_list
return out_list
def _format_list(self, extracted_list: list[Any]) -> list[str]:
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
Lifted almost verbatim from traceback.py
"""
output_list = []
for ind, (filename, lineno, name, line) in enumerate(extracted_list):
# Will emphasize the last entry
em = True if ind == len(extracted_list) - 1 else False
item = theme_table[self._theme_name].format(
[(Token.NormalEm if em else Token.Normal, " ")]
+ _tokens_filename(em, filename, lineno=lineno)
)
# This seem to be only in xmode plain (%run sinpleer), investigate why not share with verbose.
# look at _tokens_filename in forma_record.
if name != "<module>":
item += theme_table[self._theme_name].format(
[
(Token.NormalEm if em else Token.Normal, " in "),
(Token.TB.NameEm if em else Token.TB.Name, name),
]
)
item += theme_table[self._theme_name].format(
[(Token.NormalEm if em else Token, "\n")]
)
if line:
item += theme_table[self._theme_name].format(
[
(Token.Line if em else Token, " "),
(Token.Line if em else Token, line.strip()),
(Token, "\n"),
]
)
output_list.append(item)
return output_list
def _format_exception_only(
self, etype: type[BaseException], value: BaseException | None
) -> list[str]:
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.exc_info()[:2]. The return value is a list of strings, each ending
in a newline. Normally, the list contains a single string; however,
for SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax error
occurred. The message indicating which exception occurred is the
always last string in the list.
Also lifted nearly verbatim from traceback.py
"""
have_filedata = False
output_list = []
stype_tokens = [(Token.ExcName, etype.__name__)]
stype: str = theme_table[self._theme_name].format(stype_tokens)
if value is None:
# Not sure if this can still happen in Python 2.6 and above
output_list.append(stype + "\n")
else:
if issubclass(etype, SyntaxError):
assert hasattr(value, "filename")
assert hasattr(value, "lineno")
assert hasattr(value, "text")
assert hasattr(value, "offset")
assert hasattr(value, "msg")
have_filedata = True
if not value.filename:
value.filename = "<string>"
if value.lineno:
lineno = value.lineno
textline = linecache.getline(value.filename, value.lineno)
else:
lineno = "unknown"
textline = ""
output_list.append(
theme_table[self._theme_name].format(
[(Token, " ")]
+ _tokens_filename(
True,
value.filename,
lineno=(None if lineno == "unknown" else lineno),
)
+ [(Token, "\n")]
)
)
if textline == "":
# sep 2025:
# textline = py3compat.cast_unicode(value.text, "utf-8")
if value.text is None:
textline = ""
else:
assert isinstance(value.text, str)
textline = value.text
if textline is not None:
i = 0
while i < len(textline) and textline[i].isspace():
i += 1
output_list.append(
theme_table[self._theme_name].format(
[
(Token.Line, " "),
(Token.Line, textline.strip()),
(Token, "\n"),
]
)
)
if value.offset is not None:
s = " "
for c in textline[i : value.offset - 1]:
if c.isspace():
s += c
else:
s += " "
output_list.append(
theme_table[self._theme_name].format(
[(Token.Caret, s + "^"), (Token, "\n")]
)
)
s = value.msg
else:
s = self._some_str(value)
if s:
output_list.append(
theme_table[self._theme_name].format(
stype_tokens
+ [
(Token.ExcName, ":"),
(Token, " "),
(Token, s),
(Token, "\n"),
]
)
)
else:
output_list.append("%s\n" % stype)
# PEP-678 notes
output_list.extend(f"{x}\n" for x in getattr(value, "__notes__", []))
# sync with user hooks
if have_filedata:
ipinst = get_ipython()
if ipinst is not None:
assert value is not None
assert hasattr(value, "lineno")
assert hasattr(value, "filename")
ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0)
return output_list
def get_exception_only(self, etype, value):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value
"""
return ListTB.structured_traceback(self, etype, value)
def show_exception_only(
self, etype: BaseException | None, evalue: TracebackType | None
) -> None:
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
evalue : exception value
"""
# This method needs to use __call__ from *this* class, not the one from
# a subclass whose signature or behavior may be different
ostream = self.ostream
ostream.flush()
ostream.write("\n".join(self.get_exception_only(etype, evalue)))
ostream.flush()
def _some_str(self, value: Any) -> str:
# Lifted from traceback.py
try:
return str(value)
except:
return "<unprintable %s object>" % type(value).__name__
_sentinel = object()
_default = "default"
# ----------------------------------------------------------------------------
| ListTB |
python | pennersr__django-allauth | allauth/account/migrations/0004_alter_emailaddress_drop_unique_email.py | {
"start": 148,
"end": 550
} | class ____(migrations.Migration):
dependencies = [
("account", "0003_alter_emailaddress_create_unique_verified_email"),
]
operations = [
migrations.AlterField(
model_name="emailaddress",
name="email",
field=models.EmailField(
max_length=EMAIL_MAX_LENGTH, verbose_name="email address"
),
),
]
| Migration |
python | sqlalchemy__sqlalchemy | test/orm/test_froms.py | {
"start": 56493,
"end": 92983
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_alias_naming(self):
User = self.classes.User
sess = fixture_session()
ua = aliased(User, name="foobar")
q = sess.query(ua)
self.assert_compile(
q,
"SELECT foobar.id AS foobar_id, "
"foobar.name AS foobar_name FROM users AS foobar",
)
def test_correlated_subquery(self):
"""test that a subquery constructed from ORM attributes doesn't leak
out those entities to the outermost query."""
Address, users, User = (
self.classes.Address,
self.tables.users,
self.classes.User,
)
sess = fixture_session()
subq = (
select(func.count())
.where(User.id == Address.user_id)
.correlate(users)
.label("count")
)
# we don't want Address to be outside of the subquery here
eq_(
list(sess.query(User, subq)[0:3]),
[
(User(id=7, name="jack"), 1),
(User(id=8, name="ed"), 3),
(User(id=9, name="fred"), 1),
],
)
# same thing without the correlate, as it should
# not be needed
subq = (
select(func.count())
.where(User.id == Address.user_id)
.label("count")
)
# we don't want Address to be outside of the subquery here
eq_(
list(sess.query(User, subq)[0:3]),
[
(User(id=7, name="jack"), 1),
(User(id=8, name="ed"), 3),
(User(id=9, name="fred"), 1),
],
)
@testing.combinations((True,), (False,))
def test_no_uniquing_cols_legacy(self, with_entities):
"""test #6924"""
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
if with_entities:
q = (
sess.query(User)
.join(Address)
.filter(Address.user_id == 8)
.with_entities(User.id, User.name)
.order_by(User.id)
)
else:
q = (
sess.query(User.id, User.name)
.join(Address)
.filter(Address.user_id == 8)
.order_by(User.id)
)
is_(q._compile_state()._primary_entity, None)
eq_(q.all(), [(8, "ed"), (8, "ed"), (8, "ed")])
@testing.combinations((True,), (False,))
def test_no_uniquing_cols(self, with_entities):
"""test #6924"""
User = self.classes.User
Address = self.classes.Address
if with_entities:
stmt = (
select(User)
.join(Address)
.filter(Address.user_id == 8)
.with_only_columns(User.id, User.name)
.order_by(User.id)
)
else:
stmt = (
select(User.id, User.name)
.join(Address)
.filter(Address.user_id == 8)
.order_by(User.id)
)
compile_state = _ORMSelectCompileState._create_orm_context(
stmt, toplevel=True, compiler=None
)
is_(compile_state._primary_entity, None)
def test_column_queries_one(self):
User = self.classes.User
sess = fixture_session()
eq_(
sess.query(User.name).all(),
[("jack",), ("ed",), ("fred",), ("chuck",)],
)
def test_column_queries_two(self):
users, User = (
self.tables.users,
self.classes.User,
)
sess = fixture_session()
sel = users.select().where(User.id.in_([7, 8])).alias()
ua = aliased(User, sel)
q = sess.query(ua.name)
q2 = q.all()
eq_(list(q2), [("jack",), ("ed",)])
def test_column_queries_three(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(User.name, Address.email_address)
.filter(User.id == Address.user_id)
.all(),
[
("jack", "jack@bean.com"),
("ed", "ed@wood.com"),
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
("fred", "fred@fred.com"),
],
)
def test_column_queries_four(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(User.name, func.count(Address.email_address))
.outerjoin(User.addresses)
.group_by(User.id, User.name)
.order_by(User.id)
.all(),
[("jack", 1), ("ed", 3), ("fred", 1), ("chuck", 0)],
)
def test_column_queries_five(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(User, func.count(Address.email_address))
.outerjoin(User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(User(name="jack", id=7), 1),
(User(name="ed", id=8), 3),
(User(name="fred", id=9), 1),
(User(name="chuck", id=10), 0),
],
)
def test_column_queries_six(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(func.count(Address.email_address), User)
.outerjoin(User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(1, User(name="jack", id=7)),
(3, User(name="ed", id=8)),
(1, User(name="fred", id=9)),
(0, User(name="chuck", id=10)),
],
)
def test_column_queries_seven(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
adalias = aliased(Address)
eq_(
sess.query(User, func.count(adalias.email_address))
.outerjoin(User.addresses.of_type(adalias))
.group_by(User)
.order_by(User.id)
.all(),
[
(User(name="jack", id=7), 1),
(User(name="ed", id=8), 3),
(User(name="fred", id=9), 1),
(User(name="chuck", id=10), 0),
],
)
def test_column_queries_eight(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
adalias = aliased(Address)
eq_(
sess.query(func.count(adalias.email_address), User)
.outerjoin(adalias, User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(1, User(name="jack", id=7)),
(3, User(name="ed", id=8)),
(1, User(name="fred", id=9)),
(0, User(name="chuck", id=10)),
],
)
def test_column_queries_nine(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
adalias = aliased(Address)
subq = (
sess.query(User, adalias.email_address, adalias.id)
.outerjoin(adalias, User.addresses)
.subquery()
)
ua = aliased(User, subq)
aa = aliased(adalias, subq)
q = sess.query(ua, aa.email_address).order_by(ua.id, aa.id)
# select from aliasing + explicit aliasing
eq_(
q.all(),
[
(User(name="jack", id=7), "jack@bean.com"),
(User(name="ed", id=8), "ed@wood.com"),
(User(name="ed", id=8), "ed@bettyboop.com"),
(User(name="ed", id=8), "ed@lala.com"),
(User(name="fred", id=9), "fred@fred.com"),
(User(name="chuck", id=10), None),
],
)
def test_column_queries_ten(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
# anon + select from aliasing
aa = aliased(Address)
subq = (
sess.query(User)
.join(aa, User.addresses)
.filter(aa.email_address.like("%ed%"))
.subquery()
)
ua = aliased(User, subq)
eq_(
sess.query(ua).all(),
[User(name="ed", id=8), User(name="fred", id=9)],
)
def test_column_queries_eleven(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
adalias = aliased(Address)
q1 = (
sess.query(User, adalias.email_address)
.outerjoin(adalias, User.addresses)
.options(joinedload(User.addresses))
.order_by(User.id, adalias.id)
.limit(10)
)
subq = (
sess.query(User, adalias.email_address, adalias.id)
.outerjoin(adalias, User.addresses)
.subquery()
)
ua = aliased(User, subq)
aa = aliased(adalias, subq)
q2 = (
sess.query(ua, aa.email_address)
.options(joinedload(ua.addresses))
.order_by(ua.id, aa.id)
.limit(10)
)
for q in [q1, q2]:
eq_(
q.all(),
[
(
User(
addresses=[
Address(
user_id=7,
email_address="jack@bean.com",
id=1,
)
],
name="jack",
id=7,
),
"jack@bean.com",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="ed@wood.com",
id=2,
),
Address(
user_id=8,
email_address="ed@bettyboop.com",
id=3,
),
Address(
user_id=8,
email_address="ed@lala.com",
id=4,
),
],
name="ed",
id=8,
),
"ed@wood.com",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="ed@wood.com",
id=2,
),
Address(
user_id=8,
email_address="ed@bettyboop.com",
id=3,
),
Address(
user_id=8,
email_address="ed@lala.com",
id=4,
),
],
name="ed",
id=8,
),
"ed@bettyboop.com",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="ed@wood.com",
id=2,
),
Address(
user_id=8,
email_address="ed@bettyboop.com",
id=3,
),
Address(
user_id=8,
email_address="ed@lala.com",
id=4,
),
],
name="ed",
id=8,
),
"ed@lala.com",
),
(
User(
addresses=[
Address(
user_id=9,
email_address="fred@fred.com",
id=5,
)
],
name="fred",
id=9,
),
"fred@fred.com",
),
(User(addresses=[], name="chuck", id=10), None),
],
)
def test_column_from_limited_joinedload(self):
User = self.classes.User
sess = fixture_session()
def go():
results = (
sess.query(User)
.limit(1)
.options(joinedload(User.addresses))
.add_columns(User.name)
.all()
)
eq_(results, [(User(name="jack"), "jack")])
self.assert_sql_count(testing.db, go, 1)
def test_self_referential_from_self(self):
Order = self.classes.Order
sess = fixture_session()
oalias = aliased(Order)
q1 = (
sess.query(Order, oalias)
.filter(Order.user_id == oalias.user_id)
.filter(Order.user_id == 7)
.filter(Order.id > oalias.id)
.order_by(Order.id, oalias.id)
)
subq = (
sess.query(Order, oalias).filter(Order.id > oalias.id).subquery()
)
oa, oaa = aliased(Order, subq), aliased(oalias, subq)
q2 = (
sess.query(oa, oaa)
.filter(oa.user_id == oaa.user_id)
.filter(oa.user_id == 7)
.order_by(oa.id, oaa.id)
)
# same thing, but reversed.
subq = (
sess.query(oalias, Order).filter(Order.id < oalias.id).subquery()
)
oa, oaa = aliased(Order, subq), aliased(oalias, subq)
q3 = (
sess.query(oaa, oa)
.filter(oaa.user_id == oa.user_id)
.filter(oaa.user_id == 7)
.order_by(oaa.id, oa.id)
)
subq = (
sess.query(Order, oalias)
.filter(Order.user_id == oalias.user_id)
.filter(Order.user_id == 7)
.filter(Order.id > oalias.id)
.subquery()
)
oa, oaa = aliased(Order, subq), aliased(oalias, subq)
# here we go....two layers of aliasing (due to joinedload w/ limit)
q4 = (
sess.query(oa, oaa)
.order_by(oa.id, oaa.id)
.limit(10)
.options(joinedload(oa.items))
)
# gratuitous four layers
subq4 = subq
for i in range(4):
oa, oaa = aliased(Order, subq4), aliased(oaa, subq4)
subq4 = sess.query(oa, oaa).subquery()
oa, oaa = aliased(Order, subq4), aliased(oaa, subq4)
q5 = (
sess.query(oa, oaa)
.order_by(oa.id, oaa.id)
.limit(10)
.options(joinedload(oa.items))
)
for q in [
q1,
q2,
q3,
q4,
q5,
]:
eq_(
q.all(),
[
(
Order(
address_id=1,
description="order 3",
isopen=1,
user_id=7,
id=3,
),
Order(
address_id=1,
description="order 1",
isopen=0,
user_id=7,
id=1,
),
),
(
Order(
address_id=None,
description="order 5",
isopen=0,
user_id=7,
id=5,
),
Order(
address_id=1,
description="order 1",
isopen=0,
user_id=7,
id=1,
),
),
(
Order(
address_id=None,
description="order 5",
isopen=0,
user_id=7,
id=5,
),
Order(
address_id=1,
description="order 3",
isopen=1,
user_id=7,
id=3,
),
),
],
)
def test_from_self_internal_literals_newstyle(self):
Order = self.classes.Order
stmt = select(
Order.id, Order.description, literal_column("'q'").label("foo")
).where(Order.description == "order 3")
subq = aliased(
Order,
stmt.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery(),
)
stmt = select(subq).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
stmt,
"SELECT anon_1.orders_id AS "
"anon_1_orders_id, "
"anon_1.orders_description AS anon_1_orders_description "
"FROM (SELECT "
"orders.id AS orders_id, "
"orders.description AS orders_description, "
"'q' AS foo FROM orders WHERE "
"orders.description = :description_1) AS "
"anon_1",
)
def test_multi_mappers(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
test_session = fixture_session()
(user7, user8, user9, user10) = test_session.query(User).all()
(
address1,
address2,
address3,
address4,
address5,
) = test_session.query(Address).all()
expected = [
(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None),
]
sess = fixture_session(future=True)
selectquery = (
users.outerjoin(addresses)
.select()
.order_by(users.c.id, addresses.c.id)
)
result = sess.execute(
select(User, Address).from_statement(selectquery)
)
eq_(
list(result),
expected,
)
sess.expunge_all()
for address_entity in (Address, aliased(Address)):
q = (
sess.query(User)
.add_entity(address_entity)
.outerjoin(address_entity, User.addresses)
.order_by(User.id, address_entity.id)
)
eq_(q.all(), expected)
sess.expunge_all()
q = sess.query(User).add_entity(address_entity)
q = q.join(address_entity, User.addresses)
q = q.filter_by(email_address="ed@bettyboop.com")
eq_(q.all(), [(user8, address3)])
sess.expunge_all()
q = (
sess.query(User, address_entity)
.join(address_entity, User.addresses)
.filter_by(email_address="ed@bettyboop.com")
)
eq_(q.all(), [(user8, address3)])
sess.expunge_all()
q = (
sess.query(User, address_entity)
.join(address_entity, User.addresses)
.options(joinedload(User.addresses))
.filter_by(email_address="ed@bettyboop.com")
)
eq_(list(util.OrderedSet(q.all())), [(user8, address3)])
sess.expunge_all()
def test_aliased_multi_mappers(self):
User, addresses, users, Address = (
self.classes.User,
self.tables.addresses,
self.tables.users,
self.classes.Address,
)
sess = fixture_session()
(user7, user8, user9, user10) = sess.query(User).all()
(address1, address2, address3, address4, address5) = sess.query(
Address
).all()
expected = [
(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None),
]
adalias = addresses.alias("adalias")
uaj = users.outerjoin(adalias)
ua = aliased(User, uaj)
q = sess.query(ua)
q = q.add_entity(Address, alias=adalias)
result = q.order_by(User.id, adalias.c.id).all()
assert result == expected
sess.expunge_all()
q = sess.query(ua).add_entity(Address, alias=adalias)
result = q.filter(adalias.c.email_address == "ed@bettyboop.com").all()
assert result == [(user8, address3)]
def test_with_entities(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
q = sess.query(User).filter(User.id == 7).order_by(User.name)
self.assert_compile(
q.with_entities(User.id, Address).filter(
Address.user_id == User.id
),
"SELECT users.id AS users_id, addresses.id "
"AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address"
" AS addresses_email_address FROM users, "
"addresses WHERE users.id = :id_1 AND "
"addresses.user_id = users.id ORDER BY "
"users.name",
)
def test_multi_columns(self):
users, User = self.tables.users, self.classes.User
sess = fixture_session()
expected = [(u, u.name) for u in sess.query(User).all()]
for add_col in (User.name, users.c.name):
assert sess.query(User).add_columns(add_col).all() == expected
sess.expunge_all()
assert_raises(
sa_exc.ArgumentError, sess.query(User).add_columns, object()
)
def test_add_multi_columns(self):
"""test that add_column accepts a FROM clause."""
users, User = self.tables.users, self.classes.User
sess = fixture_session()
eq_(
sess.query(User.id).add_columns(users).all(),
[(7, 7, "jack"), (8, 8, "ed"), (9, 9, "fred"), (10, 10, "chuck")],
)
def test_multi_columns_2(self):
"""test aliased/nonalised joins with the usage of add_columns()"""
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
sess = fixture_session()
(user7, user8, user9, user10) = sess.query(User).all()
expected = [(user7, 1), (user8, 3), (user9, 1), (user10, 0)]
q = sess.query(User)
q = (
q.group_by(users)
.order_by(User.id)
.outerjoin(User.addresses)
.add_columns(func.count(Address.id).label("count"))
)
eq_(q.all(), expected)
sess.expunge_all()
adalias = aliased(Address)
q = sess.query(User)
q = (
q.group_by(users)
.order_by(User.id)
.outerjoin(User.addresses.of_type(adalias))
.add_columns(func.count(adalias.id).label("count"))
)
eq_(q.all(), expected)
sess.expunge_all()
# TODO: figure out why group_by(users) doesn't work here
count = func.count(addresses.c.id).label("count")
s = (
select(users, count)
.select_from(users.outerjoin(addresses))
.group_by(*[c for c in users.c])
.order_by(User.id)
)
q = sess.query(User)
result = (
q.add_columns(s.selected_columns.count).from_statement(s).all()
)
assert result == expected
def test_multi_columns_3(self):
User = self.classes.User
users = self.tables.users
sess = fixture_session()
q = sess.query(User.id, User.name)
stmt = select(users).order_by(users.c.id)
q = q.from_statement(stmt)
eq_(q.all(), [(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")])
def test_raw_columns(self):
addresses, users, User = (
self.tables.addresses,
self.tables.users,
self.classes.User,
)
sess = fixture_session()
(user7, user8, user9, user10) = sess.query(User).all()
expected = [
(user7, 1, "Name:jack"),
(user8, 3, "Name:ed"),
(user9, 1, "Name:fred"),
(user10, 0, "Name:chuck"),
]
adalias = addresses.alias()
with fixture_session() as sess:
q = (
sess.query(User)
.add_columns(
func.count(adalias.c.id), ("Name:" + users.c.name)
)
.outerjoin(adalias)
.group_by(users)
.order_by(users.c.id)
)
eq_(q.all(), expected)
# test with a straight statement
s = (
select(
users,
func.count(addresses.c.id).label("count"),
("Name:" + users.c.name).label("concat"),
)
.select_from(users.outerjoin(addresses))
.group_by(*[c for c in users.c])
.order_by(users.c.id)
)
with fixture_session() as sess:
q = sess.query(User)
result = (
q.add_columns(
s.selected_columns.count, s.selected_columns.concat
)
.from_statement(s)
.all()
)
eq_(result, expected)
with fixture_session() as sess:
uaj = users.outerjoin(addresses)
ua = aliased(User, uaj)
q = (
fixture_session()
.query(ua)
.add_columns(
func.count(addresses.c.id), ("Name:" + users.c.name)
)
.group_by(users)
.order_by(users.c.id)
)
eq_(q.all(), expected)
with fixture_session() as sess:
q = (
sess.query(User)
.add_columns(
func.count(addresses.c.id), ("Name:" + users.c.name)
)
.outerjoin(User.addresses)
.group_by(users)
.order_by(users.c.id)
)
eq_(q.all(), expected)
with fixture_session() as sess:
q = (
sess.query(User)
.add_columns(
func.count(adalias.c.id), ("Name:" + users.c.name)
)
.outerjoin(adalias)
.group_by(users)
.order_by(users.c.id)
)
eq_(q.all(), expected)
def test_unrelated_column(self):
"""Test for #9217"""
User = self.classes.User
q = select(User.id, func.lower("SANDY").label("name")).where(
User.id == 7
)
s = select(User).from_statement(q)
sess = fixture_session()
res = sess.scalars(s).one()
in_("name", res.__dict__)
eq_(res, User(name="sandy", id=7))
def test_unrelated_column_col_prop(self, decl_base):
"""Test for #9217 combined with #9273"""
class User(ComparableEntity, decl_base):
__tablename__ = "some_user_table"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column()
age: Mapped[int] = mapped_column()
is_adult: Mapped[bool] = column_property(age >= 18)
stmt = select(
literal(1).label("id"),
literal("John").label("name"),
literal(30).label("age"),
)
s = select(User).from_statement(stmt)
sess = fixture_session()
res = sess.scalars(s).one()
eq_(res, User(name="John", age=30, id=1))
def test_expression_selectable_matches_mzero(self):
User, Address = self.classes.User, self.classes.Address
ua = aliased(User)
aa = aliased(Address)
s = fixture_session()
for crit, j, exp in [
(
User.id + Address.id,
(User.addresses,),
"SELECT users.id + addresses.id AS anon_1 "
"FROM users JOIN addresses ON users.id = "
"addresses.user_id",
),
(
User.id + Address.id,
(Address.user,),
"SELECT users.id + addresses.id AS anon_1 "
"FROM addresses JOIN users ON users.id = "
"addresses.user_id",
),
(
Address.id + User.id,
(User.addresses,),
"SELECT addresses.id + users.id AS anon_1 "
"FROM users JOIN addresses ON users.id = "
"addresses.user_id",
),
(
User.id + aa.id,
(aa, User.addresses),
"SELECT users.id + addresses_1.id AS anon_1 "
"FROM users JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id",
),
]:
q = s.query(crit)
mzero = q._compile_state()._entity_zero()
is_(mzero, q._compile_state()._entities[0].entity_zero)
q = q.join(*j)
self.assert_compile(q, exp)
for crit, j, exp in [
(
ua.id + Address.id,
(ua.addresses,),
"SELECT users_1.id + addresses.id AS anon_1 "
"FROM users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id",
),
(
ua.id + aa.id,
(aa, ua.addresses),
"SELECT users_1.id + addresses_1.id AS anon_1 "
"FROM users AS users_1 JOIN addresses AS "
"addresses_1 ON users_1.id = addresses_1.user_id",
),
(
ua.id + aa.id,
(ua, aa.user),
"SELECT users_1.id + addresses_1.id AS anon_1 "
"FROM addresses AS addresses_1 JOIN "
"users AS users_1 "
"ON users_1.id = addresses_1.user_id",
),
]:
q = s.query(crit)
mzero = q._compile_state()._entity_zero()
is_(mzero, q._compile_state()._entities[0].entity_zero)
q = q.join(*j)
self.assert_compile(q, exp)
def test_aliased_adapt_on_names(self):
User, Address = self.classes("User", "Address")
agg_address = select(
Address.id,
func.sum(func.length(Address.email_address)).label(
"email_address"
),
).group_by(Address.user_id)
ag2 = aliased(Address, agg_address.subquery(), adapt_on_names=True)
# second, 'email_address' matches up to the aggregate, and we get a
# smooth JOIN from users->subquery and that's it
self.assert_compile(
select(User, ag2.email_address)
.join(ag2, User.addresses)
.filter(ag2.email_address > 5),
"SELECT users.id, users.name, anon_1.email_address FROM users "
"JOIN ("
"SELECT addresses.id AS id, sum(length(addresses.email_address)) "
"AS email_address FROM addresses GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id "
"WHERE anon_1.email_address > :email_address_1",
)
def test_aliased_warns_missing_column(self):
User, Address = self.classes("User", "Address")
agg_address = select(
Address.id,
func.sum(func.length(Address.email_address)).label(
"email_address"
),
).group_by(Address.user_id)
ag1 = aliased(Address, agg_address.subquery())
# without adapt on names, 'email_address' isn't matched up - we
# get the raw "address" element in the SELECT
with testing.expect_warnings(
r"Did not locate an expression in selectable for attribute "
r"'email_address'; to match by name, use the "
r"adapt_on_names parameter"
):
self.assert_compile(
select(User, ag1.email_address)
.join(ag1, User.addresses)
.filter(ag1.email_address > 5),
"SELECT users.id, users.name, addresses.email_address "
"FROM users JOIN "
"(SELECT addresses.id AS id, "
"sum(length(addresses.email_address)) "
"AS email_address FROM addresses "
"GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id, addresses "
"WHERE addresses.email_address > :email_address_1",
)
def test_aliased_warns_unmatched_name(self):
User, Address = self.classes("User", "Address")
agg_address = select(
Address.id,
func.sum(func.length(Address.email_address)).label(
"email_address_misspelled"
),
).group_by(Address.user_id)
ag1 = aliased(Address, agg_address.subquery(), adapt_on_names=True)
# adapt_on_names is set but still wrong name
with testing.expect_warnings(
r"Did not locate an expression in selectable for attribute "
r"'email_address'; ensure name is correct in expression"
):
self.assert_compile(
select(User, ag1.email_address)
.join(ag1, User.addresses)
.filter(ag1.email_address > 5),
"SELECT users.id, users.name, addresses.email_address "
"FROM users JOIN "
"(SELECT addresses.id AS id, "
"sum(length(addresses.email_address)) "
"AS email_address_misspelled FROM addresses "
"GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id, addresses "
"WHERE addresses.email_address > :email_address_1",
)
| MixedEntitiesTest |
python | getsentry__sentry | tests/sentry/event_manager/test_severity.py | {
"start": 11270,
"end": 16131
} | class ____(TestCase):
@patch("sentry.event_manager._get_severity_score", return_value=(0.1121, "ml"))
def test_flag_on(self, mock_get_severity_score: MagicMock) -> None:
manager = EventManager(
make_event(
exception={"values": [{"type": "NopeError", "value": "Nopey McNopeface"}]},
platform="python",
)
)
event = manager.save(self.project.id)
mock_get_severity_score.assert_called()
assert (
event.group
and event.group.get_event_metadata()["severity"] == 0.1121
and event.group.get_event_metadata()["severity_reason"] == "ml"
)
@patch("sentry.event_manager._get_severity_score", return_value=(0.1121, "ml"))
def test_flag_off(self, mock_get_severity_score: MagicMock) -> None:
with self.feature({"projects:first-event-severity-calculation": False}):
manager = EventManager(
make_event(
exception={"values": [{"type": "NopeError", "value": "Nopey McNopeface"}]},
platform="python",
)
)
event = manager.save(self.project.id)
mock_get_severity_score.assert_not_called()
assert (
event.group
and "severity" not in event.group.get_event_metadata()
and "severity.reason" not in event.group.get_event_metadata()
)
@patch("sentry.event_manager._get_severity_score", return_value=(0.1121, "ml"))
def test_get_severity_score_not_called_on_second_event(
self, mock_get_severity_score: MagicMock
) -> None:
nope_event = EventManager(
make_event(
exception={"values": [{"type": "NopeError", "value": "Nopey McNopeface"}]},
fingerprint=["dogs_are_great"],
platform="python",
)
).save(self.project.id)
assert mock_get_severity_score.call_count == 1
broken_stuff_event = EventManager(
make_event(
exception={"values": [{"type": "BrokenStuffError", "value": "It broke"}]},
fingerprint=["dogs_are_great"],
platform="python",
)
).save(self.project.id)
# Same group, but no extra `_get_severity_score` call
assert broken_stuff_event.group_id == nope_event.group_id
assert mock_get_severity_score.call_count == 1
@patch("sentry.event_manager._get_severity_score", return_value=(0.1121, "ml"))
def test_score_not_clobbered_by_second_event(self, mock_get_severity_score: MagicMock) -> None:
with TaskRunner(): # Needed because updating groups is normally async
nope_event = EventManager(
make_event(
exception={"values": [{"type": "NopeError", "value": "Nopey McNopeface"}]},
fingerprint=["dogs_are_great"],
platform="python",
)
).save(self.project.id)
assert nope_event.group_id is not None
group = Group.objects.get(id=nope_event.group_id)
# This first assertion isn't useful in and of itself, but it allows us to prove
# below that the data gets updated
assert group.data["metadata"]["type"] == "NopeError"
assert group.data["metadata"]["severity"] == 0.1121
broken_stuff_event = EventManager(
make_event(
exception={"values": [{"type": "BrokenStuffError", "value": "It broke"}]},
fingerprint=["dogs_are_great"],
platform="python",
)
).save(self.project.id)
# Both events landed in the same group
assert broken_stuff_event.group_id == nope_event.group_id
group.refresh_from_db()
# Metadata has been updated, but severity hasn't been clobbered in the process
assert group.data["metadata"]["type"] == "BrokenStuffError"
assert group.get_event_metadata()["severity"] == 0.1121
@patch("sentry.event_manager._get_severity_score")
def test_killswitch_on(self, mock_get_severity_score: MagicMock) -> None:
with override_options({"issues.severity.skip-seer-requests": [self.project.id]}):
event = EventManager(
make_event(
exception={"values": [{"type": "NopeError", "value": "Nopey McNopeface"}]},
platform="python",
)
).save(self.project.id)
assert event.group
assert "severity" not in event.group.get_event_metadata()
assert cache.get(SEER_ERROR_COUNT_KEY) is None
assert mock_get_severity_score.call_count == 0
| TestEventManagerSeverity |
python | pytorch__pytorch | test/jit/test_freezing.py | {
"start": 118214,
"end": 122118
} | class ____(JitTestCase):
def setUp(self):
super().setUp()
self.default_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float)
def tearDown(self):
super().tearDown()
torch.set_default_dtype(self.default_dtype)
def getConv(self):
return nn.Conv2d(3, 32, kernel_size=3, stride=2).eval()
def getInput(self):
return torch.rand([4, 3, 4, 4])
def freezeAndConvert(self, mod):
mod = torch.jit.freeze(torch.jit.script(mod.eval()))
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
return mod
def checkResults(self, mod1, mod2):
inp = self.getInput()
self.assertEqual(mod1(inp), mod2(inp))
def test_successful(self):
# simple conv-relu
mod_eager = nn.Sequential(self.getConv(), nn.Hardswish(), nn.ReLU())
mod = self.freezeAndConvert(mod_eager)
FileCheck().check("mkldnn_convolution").check_next(
"prim::MKLDNNHardSwish_"
).check_next("aten::relu_").run(mod.graph)
self.checkResults(mod_eager, mod)
def test_merge_liveness(self):
class Mod(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
# this mul can be inplaced since x is dead after this use
temporary = x * self.tensor
# temporary livespan is the return node,
# add can not be inplaced
return temporary + temporary, temporary
mod_eager = nn.Sequential(self.getConv(), Mod(torch.rand([4, 32, 1, 1])))
mod = self.freezeAndConvert(mod_eager)
FileCheck().check("aten::mul_").check_not("aten::add_").run(mod.graph)
self.checkResults(mod_eager, mod)
def test_always_alive_values(self):
class Mod(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
# x can't be inplaced because its a return value,
# check that the inplacing pass doesn't try to inplace
# self.tensor because its always alive
return x * self.tensor, x
mod_eager = nn.Sequential(self.getConv(), Mod(torch.rand([4, 32, 1, 1])))
mod = self.freezeAndConvert(mod_eager)
FileCheck().check_not("aten::mul_").run(mod.graph)
self.checkResults(mod_eager, mod)
conv = self.getConv()
class Mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.tensor = torch.rand([4, 32, 1, 1])
self.conv = conv
def forward(self, x):
# the shapes dont add up on this just testing a particular pattern
conv_output = self.conv(x)
return conv_output, self.conv(torch.add(x, x))
mod = self.freezeAndConvert(Mod())
# x is an input to the graph, and so it should not be inplaced
# in the torch.add(x, x) call
FileCheck().check_not("aten::add_").run(mod.graph)
def test_switch_inputs_to_inplace(self):
class Mod(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
# self.tensor cannot be inplaced, however x can,
# and bc add is commutative we can reverse inputs to add_
return self.tensor + x
mod_eager = nn.Sequential(self.getConv(), Mod(torch.rand([4, 32, 1, 1])))
mod = self.freezeAndConvert(mod_eager)
FileCheck().check("aten::add_").run(mod.graph)
self.checkResults(mod_eager, mod)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestMKLDNNReinplacing |
python | davidhalter__jedi | jedi/inference/value/klass.py | {
"start": 3231,
"end": 4704
} | class ____(TreeNameDefinition):
def __init__(self, class_value, tree_name, name_context, apply_decorators):
super().__init__(name_context, tree_name)
self._apply_decorators = apply_decorators
self._class_value = class_value
@iterator_to_value_set
def infer(self):
# We're using a different value to infer, so we cannot call super().
from jedi.inference.syntax_tree import tree_name_to_values
inferred = tree_name_to_values(
self.parent_context.inference_state, self.parent_context, self.tree_name)
for result_value in inferred:
if self._apply_decorators:
yield from result_value.py__get__(instance=None, class_value=self._class_value)
else:
yield result_value
@property
def api_type(self):
type_ = super().api_type
if type_ == 'function':
definition = self.tree_name.get_definition()
if definition is None:
return type_
if function_is_property(definition):
# This essentially checks if there is an @property before
# the function. @property could be something different, but
# any programmer that redefines property as something that
# is not really a property anymore, should be shot. (i.e.
# this is a heuristic).
return 'property'
return type_
| ClassName |
python | dask__dask | dask/array/_array_expr/_rechunk.py | {
"start": 2605,
"end": 6155
} | class ____(Rechunk):
_parameters = ["array", "_chunks", "threshold", "block_size_limit"]
@cached_property
def chunks(self):
return self.operand("_chunks")
def _lower(self):
return
def _layer(self):
steps = plan_rechunk(
self.array.chunks,
self.chunks,
self.array.dtype.itemsize,
self.threshold,
self.block_size_limit,
)
name = self.array.name
old_chunks = self.array.chunks
layers = []
for i, c in enumerate(steps):
level = len(steps) - i - 1
name, old_chunks, layer = _compute_rechunk(
name, old_chunks, c, level, self.name
)
layers.append(layer)
return toolz.merge(*layers)
def _compute_rechunk(old_name, old_chunks, chunks, level, name):
"""Compute the rechunk of *x* to the given *chunks*."""
# TODO: redo this logic
# if x.size == 0:
# # Special case for empty array, as the algorithm below does not behave correctly
# return empty(x.shape, chunks=chunks, dtype=x.dtype)
ndim = len(old_chunks)
crossed = intersect_chunks(old_chunks, chunks)
x2 = dict()
intermediates = dict()
# token = tokenize(old_name, chunks)
if level != 0:
merge_name = name.replace("rechunk-merge-", f"rechunk-merge-{level}-")
split_name = name.replace("rechunk-merge-", f"rechunk-split-{level}-")
else:
merge_name = name.replace("rechunk-merge-", "rechunk-merge-")
split_name = name.replace("rechunk-merge-", "rechunk-split-")
split_name_suffixes = itertools.count()
# Pre-allocate old block references, to allow reuse and reduce the
# graph's memory footprint a bit.
old_blocks = np.empty([len(c) for c in old_chunks], dtype="O")
for index in np.ndindex(old_blocks.shape):
old_blocks[index] = (old_name,) + index
# Iterate over all new blocks
new_index = itertools.product(*(range(len(c)) for c in chunks))
for new_idx, cross1 in zip(new_index, crossed):
key = (merge_name,) + new_idx
old_block_indices = [[cr[i][0] for cr in cross1] for i in range(ndim)]
subdims1 = [len(set(old_block_indices[i])) for i in range(ndim)]
rec_cat_arg = np.empty(subdims1, dtype="O")
rec_cat_arg_flat = rec_cat_arg.flat
# Iterate over the old blocks required to build the new block
for rec_cat_index, ind_slices in enumerate(cross1):
old_block_index, slices = zip(*ind_slices)
name = (split_name, next(split_name_suffixes))
old_index = old_blocks[old_block_index][1:]
if all(
slc.start == 0 and slc.stop == old_chunks[i][ind]
for i, (slc, ind) in enumerate(zip(slices, old_index))
):
rec_cat_arg_flat[rec_cat_index] = old_blocks[old_block_index]
else:
intermediates[name] = (
operator.getitem,
old_blocks[old_block_index],
slices,
)
rec_cat_arg_flat[rec_cat_index] = name
assert rec_cat_index == rec_cat_arg.size - 1
# New block is formed by concatenation of sliced old blocks
if all(d == 1 for d in rec_cat_arg.shape):
x2[key] = rec_cat_arg.flat[0]
else:
x2[key] = (concatenate3, rec_cat_arg.tolist())
del old_blocks, new_index
return name, chunks, {**x2, **intermediates}
| TasksRechunk |
python | pytorch__pytorch | torch/jit/_trace.py | {
"start": 10757,
"end": 23717
} | class ____(Exception):
def __init__(self, graph_diff_error, tensor_compare_error, extra_msg=None):
self.message = "Tracing failed sanity checks!\n"
if extra_msg is not None:
self.message += extra_msg + "\n"
if graph_diff_error is not None:
self.message += "ERROR: Graphs differed across invocations!\n"
self.message += indent(graph_diff_error) + "\n"
if tensor_compare_error is not None:
self.message += (
"ERROR: Tensor-valued Constant nodes differed in value "
"across invocations. This often indicates that the tracer has"
" encountered untraceable code.\n"
)
self.message += indent(tensor_compare_error) + "\n"
super().__init__(self.message)
# Check the traced module against a set of user-provided validation inputs
@torch.no_grad()
def _check_trace(
check_inputs,
func,
traced_func,
check_tolerance,
strict,
force_outplace,
is_trace_module,
_module_class,
example_inputs_is_kwarg=False,
):
# Note: tracing is independent of optimizations, which consume the trace
for inputs in check_inputs:
if isinstance(inputs, torch.Tensor):
inputs = (inputs,)
if is_trace_module:
copied_dict = {}
# pyrefly: ignore [missing-attribute]
for name, data in inputs.items():
copied_dict[name] = _clone_inputs(data)
check_mod = torch.jit.trace_module(
getattr(func, "__self__", func),
copied_dict,
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
_compilation_unit=torch._C.CompilationUnit(),
example_inputs_is_kwarg=example_inputs_is_kwarg,
_store_inputs=False,
)
check_mod_func = check_mod._c._get_method(traced_func.name)
inputs = inputs[traced_func.name]
if (
isinstance(inputs, (torch.Tensor))
or isinstance(inputs, dict)
and not example_inputs_is_kwarg
):
inputs = (inputs,)
else:
if example_inputs_is_kwarg:
check_mod = torch.jit.trace(
func,
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
example_kwarg_inputs=_clone_inputs(inputs),
_store_inputs=False,
)
else:
check_mod = torch.jit.trace(
func,
_clone_inputs(inputs),
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
_store_inputs=False,
)
check_mod_func = check_mod
def graph_diagnostic_info():
mod_canonicalized = torch._C._jit_pass_canonicalize(traced_func.graph)
torch._C._jit_pass_inline(mod_canonicalized)
torch._C._jit_pass_erase_shape_information(mod_canonicalized)
mod_str = str(mod_canonicalized)
mod_str = re.sub(r"___torch_mangle_[0-9]+\.", "", mod_str)
check_canonicalized = torch._C._jit_pass_canonicalize(check_mod_func.graph)
torch._C._jit_pass_inline(check_canonicalized)
torch._C._jit_pass_erase_shape_information(check_canonicalized)
check_str = str(check_canonicalized)
check_str = re.sub(r"___torch_mangle_[0-9]+\.", "", check_str)
graph_diff_errors = None
if mod_str != check_str:
import difflib
graph_diff = difflib.ndiff(
mod_str.splitlines(True), check_str.splitlines(True)
)
graph_diff_errors = "Graph diff:\n" + indent("".join(graph_diff)) + "\n"
for n_mod, n_check in zip(
mod_canonicalized.nodes(), check_canonicalized.nodes()
):
if str(n_mod) != str(n_check):
graph_diff_errors += "First diverging operator:\n"
node_diff = difflib.ndiff(
str(n_mod).splitlines(True), str(n_check).splitlines(True)
)
source_printout = (
"Node diff:\n" + indent("".join(node_diff)) + "\n"
)
mod_stack = n_mod.sourceRange()
if mod_stack:
source_printout += (
"Trace source location:\n" + indent(mod_stack) + "\n"
)
check_stack = n_check.sourceRange()
if check_stack:
source_printout += (
"Check source location:\n" + indent(check_stack) + "\n"
)
graph_diff_errors += source_printout
break # For now, only print out the first pair of nodes that diverges
tensor_compare_errors = None
# Check Tensor-valued constant nodes
for n_mod, n_check in zip(
mod_canonicalized.nodes(), check_canonicalized.nodes()
):
if n_mod.kind() != n_check.kind():
break # Graphs have already diverged
if n_mod.kind() == "prim::Constant" and not (
n_mod.mustBeNone() or n_check.mustBeNone()
):
if not n_mod.hasAttribute("value"):
continue
if n_mod.kindOf("value") != "t" or n_check.kindOf("value") != "t":
continue
mod_tensor_val = n_mod.t("value")
check_tensor_val = n_check.t("value")
try:
torch.testing.assert_close(
mod_tensor_val, check_tensor_val, equal_nan=True
)
except (RuntimeError, AssertionError) as e:
if tensor_compare_errors is None:
tensor_compare_errors = ""
tensor_compare_errors += "Node:\n" + indent(str(n_mod)) + "\n"
compare_stack = n_mod.sourceRange()
if compare_stack:
tensor_compare_errors += (
"Source Location:\n" + indent(compare_stack) + "\n"
)
tensor_compare_errors += "Comparison exception: " + indent(
str(e)
)
break # For now, only print the first diverging pair
return graph_diff_errors, tensor_compare_errors
def wrap_retval(x):
return x if isinstance(x, tuple) else (x,)
def run_mod_and_filter_tensor_outputs(mod, inputs, running_what):
try:
if isinstance(inputs, dict) and example_inputs_is_kwarg:
outs = wrap_retval(mod(**inputs))
else:
outs = wrap_retval(mod(*_clone_inputs(inputs)))
outs = [out for out in outs if isinstance(out, torch.Tensor)]
return outs
except Exception as e:
graph_diff_errors, tensor_compare_errors = graph_diagnostic_info()
msg = f"encountered an exception while running the {running_what} with test inputs.\nException:\n{indent(str(e))}"
raise TracingCheckError(
graph_diff_errors,
tensor_compare_errors,
extra_msg=msg,
) from e
has_warned = [False]
def maybe_warn_nondeterministic():
if has_warned[0]:
return
has_warned[0] = True
nondeterm_ops = [
op for op in traced_func.graph.nodes() if op.isNondeterministic()
]
if len(nondeterm_ops) > 0:
nondeterministic_ops_warning = "Trace had nondeterministic nodes. "
nondeterministic_ops_warning += (
"Did you forget call .eval() on your model? Nodes:\n"
)
nondeterministic_ops_warning += "\n".join(
[indent(str(op)) for op in nondeterm_ops][:20]
)
nondeterministic_ops_warning += (
"\nThis may cause errors in trace checking. To disable trace checking,"
" pass check_trace=False to torch.jit.trace()"
)
warnings.warn(
nondeterministic_ops_warning, category=TracerWarning, stacklevel=5
)
def compare_outputs(original, reference, match_what):
all_ok = True
for i, (orig, ref) in enumerate(zip(original, reference)):
try:
if orig.is_quantized:
orig = orig.dequantize()
if ref.is_quantized:
ref = ref.dequantize()
if orig.is_mkldnn:
orig = orig.to_dense()
if ref.is_mkldnn:
ref = ref.to_dense()
if ref.is_complex() or orig.is_complex():
torch.testing.assert_close(
orig.to(torch.cdouble),
ref.to(torch.cdouble),
rtol=check_tolerance,
atol=default_tolerances(orig, ref)[1],
equal_nan=True,
)
else:
if orig.is_mps or ref.is_mps:
torch.testing.assert_close(
orig.float(),
ref.float(),
rtol=check_tolerance,
atol=default_tolerances(orig, ref)[1],
equal_nan=True,
)
elif getattr(orig, "is_nested", None) or getattr(
ref, "is_nested", None
):
assert getattr(orig, "is_nested", None) == getattr(
ref, "is_nested", None
)
for t_orig, t_ref in zip(orig.unbind(), ref.unbind()):
torch.testing.assert_close(
t_orig.double(),
t_ref.double(),
rtol=check_tolerance,
atol=default_tolerances(t_orig, t_ref)[1],
equal_nan=True,
)
else:
torch.testing.assert_close(
orig.double(),
ref.double(),
rtol=check_tolerance,
atol=default_tolerances(orig, ref)[1],
equal_nan=True,
)
except AssertionError as e:
maybe_warn_nondeterministic()
warnings.warn(
"Output nr "
+ str(i + 1)
+ ". of the traced function does not match "
"the corresponding output of the "
+ match_what
+ ". Detailed error:\n"
+ str(e),
category=TracerWarning,
stacklevel=4,
)
all_ok = False
return all_ok
traced_outs = run_mod_and_filter_tensor_outputs(traced_func, inputs, "trace")
fn_outs = run_mod_and_filter_tensor_outputs(func, inputs, "Python function")
if compare_outputs(traced_outs, fn_outs, "Python function"):
check_outs = run_mod_and_filter_tensor_outputs(
check_mod_func, inputs, "repeated trace"
)
compare_outputs(traced_outs, check_outs, "repeated trace")
diag_info = graph_diagnostic_info()
if any(info is not None for info in diag_info):
raise TracingCheckError(*diag_info)
| TracingCheckError |
python | redis__redis-py | redis/_parsers/base.py | {
"start": 13698,
"end": 16332
} | class ____(AsyncBaseParser):
"""Base class for async resp parsing"""
__slots__ = AsyncBaseParser.__slots__ + ("encoder", "_buffer", "_pos", "_chunks")
def __init__(self, socket_read_size: int):
super().__init__(socket_read_size)
self.encoder: Optional[Encoder] = None
self._buffer = b""
self._chunks = []
self._pos = 0
def _clear(self):
self._buffer = b""
self._chunks.clear()
def on_connect(self, connection):
"""Called when the stream connects"""
self._stream = connection._reader
if self._stream is None:
raise RedisError("Buffer is closed.")
self.encoder = connection.encoder
self._clear()
self._connected = True
def on_disconnect(self):
"""Called when the stream disconnects"""
self._connected = False
async def can_read_destructive(self) -> bool:
if not self._connected:
raise RedisError("Buffer is closed.")
if self._buffer:
return True
try:
async with async_timeout(0):
return self._stream.at_eof()
except TimeoutError:
return False
async def _read(self, length: int) -> bytes:
"""
Read `length` bytes of data. These are assumed to be followed
by a '\r\n' terminator which is subsequently discarded.
"""
want = length + 2
end = self._pos + want
if len(self._buffer) >= end:
result = self._buffer[self._pos : end - 2]
else:
tail = self._buffer[self._pos :]
try:
data = await self._stream.readexactly(want - len(tail))
except IncompleteReadError as error:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from error
result = (tail + data)[:-2]
self._chunks.append(data)
self._pos += want
return result
async def _readline(self) -> bytes:
"""
read an unknown number of bytes up to the next '\r\n'
line separator, which is discarded.
"""
found = self._buffer.find(b"\r\n", self._pos)
if found >= 0:
result = self._buffer[self._pos : found]
else:
tail = self._buffer[self._pos :]
data = await self._stream.readline()
if not data.endswith(b"\r\n"):
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
result = (tail + data)[:-2]
self._chunks.append(data)
self._pos += len(result) + 2
return result
| _AsyncRESPBase |
python | django__django | django/db/models/fields/json.py | {
"start": 24172,
"end": 24255
} | class ____(KeyTransformNumericLookupMixin, lookups.LessThan):
pass
| KeyTransformLt |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 20836,
"end": 21105
} | class ____(models.Model):
greeting = models.CharField(max_length=100)
history = HistoricalRecords()
# Clear the SIMPLE_HISTORY_HISTORY_CHANGE_REASON_FIELD
delattr(settings, "SIMPLE_HISTORY_HISTORY_CHANGE_REASON_USE_TEXT_FIELD")
| DefaultTextFieldChangeReasonModel |
python | skorch-dev__skorch | skorch/utils.py | {
"start": 20225,
"end": 22350
} | class ____:
"""Stores a generator and calls ``tee`` on it to create new generators
when ``TeeGenerator`` is iterated over to let you iterate over the given
generator more than once.
"""
def __init__(self, gen):
self.gen = gen
def __iter__(self):
self.gen, it = tee(self.gen)
yield from it
def _check_f_arguments(caller_name, **kwargs):
"""Check file name arguments and return them
This is used for checking if arguments to, e.g., ``save_params``
are correct.
Parameters
----------
caller_name : str
Name of caller, is only required for the error message.
kwargs : dict
Keyword arguments that are intended to be checked.
Returns
-------
kwargs_module : dict
Keyword arguments for saving/loading modules.
kwargs_other : dict
Keyword arguments for saving/loading everything else.
Raises
------
TypeError
There are two possibilities for arguments to be
incorrect. First, if they're not called 'f_*'. Second, if both
'f_params' and 'f_module' are passed, since those designate the
same thing.
"""
if kwargs.get('f_params') and kwargs.get('f_module'):
raise TypeError("{} called with both f_params and f_module, please choose one"
.format(caller_name))
kwargs_module = {}
kwargs_other = {}
keys_other = {'f_history', 'f_pickle'}
for key, val in kwargs.items():
if not key.startswith('f_'):
raise TypeError(
"{name} got an unexpected argument '{key}', did you mean 'f_{key}'?"
.format(name=caller_name, key=key))
if val is None:
continue
if key in keys_other:
kwargs_other[key] = val
else:
# strip 'f_' prefix and attach '_', and normalize 'params' to 'module'
# e.g. 'f_optimizer' becomes 'optimizer_', 'f_params' becomes 'module_'
key = 'module_' if key == 'f_params' else key[2:] + '_'
kwargs_module[key] = val
return kwargs_module, kwargs_other
| TeeGenerator |
python | wandb__wandb | wandb/vendor/pygments/lexers/esoteric.py | {
"start": 2485,
"end": 4885
} | class ____(RegexLexer):
"""
Basic lexer for the input language for the
`CAmkES <https://sel4.systems/CAmkES/>`_ component platform.
.. versionadded:: 2.1
"""
name = 'CAmkES'
aliases = ['camkes', 'idl4']
filenames = ['*.camkes', '*.idl4']
tokens = {
'root': [
# C pre-processor directive
(r'^\s*#.*\n', Comment.Preproc),
# Whitespace, comments
(r'\s+', Text),
(r'/\*(.|\n)*?\*/', Comment),
(r'//.*\n', Comment),
(r'[\[(){},.;\]]', Punctuation),
(r'[~!%^&*+=|?:<>/-]', Operator),
(words(('assembly', 'attribute', 'component', 'composition',
'configuration', 'connection', 'connector', 'consumes',
'control', 'dataport', 'Dataport', 'Dataports', 'emits',
'event', 'Event', 'Events', 'export', 'from', 'group',
'hardware', 'has', 'interface', 'Interface', 'maybe',
'procedure', 'Procedure', 'Procedures', 'provides',
'template', 'thread', 'threads', 'to', 'uses', 'with'),
suffix=r'\b'), Keyword),
(words(('bool', 'boolean', 'Buf', 'char', 'character', 'double',
'float', 'in', 'inout', 'int', 'int16_6', 'int32_t',
'int64_t', 'int8_t', 'integer', 'mutex', 'out', 'real',
'refin', 'semaphore', 'signed', 'string', 'struct',
'uint16_t', 'uint32_t', 'uint64_t', 'uint8_t', 'uintptr_t',
'unsigned', 'void'),
suffix=r'\b'), Keyword.Type),
# Recognised attributes
(r'[a-zA-Z_]\w*_(priority|domain|buffer)', Keyword.Reserved),
(words(('dma_pool', 'from_access', 'to_access'), suffix=r'\b'),
Keyword.Reserved),
# CAmkES-level include
(r'import\s+(<[^>]*>|"[^"]*");', Comment.Preproc),
# C-level include
(r'include\s+(<[^>]*>|"[^"]*");', Comment.Preproc),
# Literals
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'-?[\d]+', Number),
(r'-?[\d]+\.[\d]+', Number.Float),
(r'"[^"]*"', String),
(r'[Tt]rue|[Ff]alse', Name.Builtin),
# Identifiers
(r'[a-zA-Z_]\w*', Name),
],
}
| CAmkESLexer |
python | getsentry__sentry-python | sentry_sdk/integrations/ray.py | {
"start": 5031,
"end": 5323
} | class ____(Integration):
identifier = "ray"
origin = f"auto.queue.{identifier}"
@staticmethod
def setup_once():
# type: () -> None
version = package_version("ray")
_check_minimum_version(RayIntegration, version)
_patch_ray_remote()
| RayIntegration |
python | scikit-learn__scikit-learn | sklearn/ensemble/_hist_gradient_boosting/grower.py | {
"start": 919,
"end": 5376
} | class ____:
"""Tree Node class used in TreeGrower.
This isn't used for prediction purposes, only for training (see
TreePredictor).
Parameters
----------
depth : int
The depth of the node, i.e. its distance from the root.
sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32
The indices of the samples at the node.
partition_start : int
start position of the node's sample_indices in splitter.partition.
partition_stop : int
stop position of the node's sample_indices in splitter.partition.
sum_gradients : float
The sum of the gradients of the samples at the node.
sum_hessians : float
The sum of the hessians of the samples at the node.
Attributes
----------
depth : int
The depth of the node, i.e. its distance from the root.
sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32
The indices of the samples at the node.
sum_gradients : float
The sum of the gradients of the samples at the node.
sum_hessians : float
The sum of the hessians of the samples at the node.
split_info : SplitInfo or None
The result of the split evaluation.
is_leaf : bool
True if node is a leaf
left_child : TreeNode or None
The left child of the node. None for leaves.
right_child : TreeNode or None
The right child of the node. None for leaves.
value : float or None
The value of the leaf, as computed in finalize_leaf(). None for
non-leaf nodes.
partition_start : int
start position of the node's sample_indices in splitter.partition.
partition_stop : int
stop position of the node's sample_indices in splitter.partition.
allowed_features : None or ndarray, dtype=int
Indices of features allowed to split for children.
interaction_cst_indices : None or list of ints
Indices of the interaction sets that have to be applied on splits of
child nodes. The fewer sets the stronger the constraint as fewer sets
contain fewer features.
children_lower_bound : float
children_upper_bound : float
"""
def __init__(
self,
*,
depth,
sample_indices,
partition_start,
partition_stop,
sum_gradients,
sum_hessians,
value=None,
):
self.depth = depth
self.sample_indices = sample_indices
self.n_samples = sample_indices.shape[0]
self.sum_gradients = sum_gradients
self.sum_hessians = sum_hessians
self.value = value
self.is_leaf = False
self.allowed_features = None
self.interaction_cst_indices = None
self.set_children_bounds(float("-inf"), float("+inf"))
self.split_info = None
self.left_child = None
self.right_child = None
self.histograms = None
# start and stop indices of the node in the splitter.partition
# array. Concretely,
# self.sample_indices = view(self.splitter.partition[start:stop])
# Please see the comments about splitter.partition and
# splitter.split_indices for more info about this design.
# These 2 attributes are only used in _update_raw_prediction, because we
# need to iterate over the leaves and I don't know how to efficiently
# store the sample_indices views because they're all of different sizes.
self.partition_start = partition_start
self.partition_stop = partition_stop
def set_children_bounds(self, lower, upper):
"""Set children values bounds to respect monotonic constraints."""
# These are bounds for the node's *children* values, not the node's
# value. The bounds are used in the splitter when considering potential
# left and right child.
self.children_lower_bound = lower
self.children_upper_bound = upper
def __lt__(self, other_node):
"""Comparison for priority queue.
Nodes with high gain are higher priority than nodes with low gain.
heapq.heappush only need the '<' operator.
heapq.heappop take the smallest item first (smaller is higher
priority).
Parameters
----------
other_node : TreeNode
The node to compare with.
"""
return self.split_info.gain > other_node.split_info.gain
| TreeNode |
python | encode__django-rest-framework | tests/test_atomic_requests.py | {
"start": 501,
"end": 654
} | class ____(APIView):
def post(self, request, *args, **kwargs):
BasicModel.objects.create()
return Response({'method': 'GET'})
| BasicView |
python | apache__airflow | airflow-core/src/airflow/cli/cli_config.py | {
"start": 1786,
"end": 2470
} | class ____(argparse.ArgumentParser):
"""CustomParser to display help message."""
def _check_value(self, action, value):
"""Override _check_value and check conditionally added command."""
if action.choices is not None and value not in action.choices:
check_legacy_command(action, value)
super()._check_value(action, value)
def error(self, message):
"""Override error and use print_help instead of print_usage."""
self.print_help()
self.exit(2, f"\n{self.prog} command error: {message}, see help above.\n")
# Used in Arg to enable `None' as a distinct value from "not passed"
_UNSET = object()
| DefaultHelpParser |
python | pytorch__pytorch | test/inductor/test_aoti_cross_compile_windows.py | {
"start": 411,
"end": 717
} | class ____:
"""Configuration for a model test case."""
name: str
model_class: type
example_inputs: tuple[torch.Tensor, ...]
dynamic_shapes: Optional[dict[str, Any]] = None
inductor_configs: Optional[dict[str, Any]] = None
rtol: float = 1e-4
atol: float = 1e-4
| ModelTestConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-prestashop/components.py | {
"start": 453,
"end": 555
} | class ____(Exception):
"""Replacement for pendulum's ParserError"""
pass
@dataclass
| ParserError |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-qdrant/destination_qdrant/config.py | {
"start": 222,
"end": 309
} | class ____(BaseModel):
mode: Literal["no_auth"] = Field("no_auth", const=True)
| NoAuth |
python | huggingface__transformers | tests/models/clap/test_processing_clap.py | {
"start": 987,
"end": 4649
} | class ____(unittest.TestCase):
def setUp(self):
self.checkpoint = "laion/clap-htsat-unfused"
self.tmpdirname = tempfile.mkdtemp()
def get_tokenizer(self, **kwargs):
return RobertaTokenizer.from_pretrained(self.checkpoint, **kwargs)
def get_feature_extractor(self, **kwargs):
return ClapFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, TokenizersBackend)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, ClapFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = ClapProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0)
processor = ClapProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, TokenizersBackend)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, ClapFeatureExtractor)
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(audio=raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
| ClapProcessorTest |
python | django-extensions__django-extensions | django_extensions/management/commands/admin_generator.py | {
"start": 2312,
"end": 3505
} | class ____(UnicodeMixin):
def __init__(self, app_config, model_res, **options):
self.app_config = app_config
self.model_res = model_res
self.options = options
def __iter__(self):
for model in self.app_config.get_models():
admin_model = AdminModel(model, **self.options)
for model_re in self.model_res:
if model_re.search(admin_model.name):
break
else:
if self.model_res:
continue
yield admin_model
def __unicode__(self):
return "".join(self._unicode_generator())
def _unicode_generator(self):
models_list = [admin_model.name for admin_model in self]
yield PRINT_IMPORTS % dict(models=", ".join(models_list))
admin_model_names = []
for admin_model in self:
yield PRINT_ADMIN_CLASS % dict(
name=admin_model.name,
class_=admin_model,
)
admin_model_names.append(admin_model.name)
def __repr__(self):
return "<%s[%s]>" % (
self.__class__.__name__,
self.app.name,
)
| AdminApp |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataflow.py | {
"start": 19034,
"end": 23101
} | class ____:
@pytest.fixture
def create_operator(self):
"""
Creates a mock create datapipeline operator to be used in testing.
"""
return DataflowCreatePipelineOperator(
task_id="test_create_datapipeline",
body=TEST_PIPELINE_BODY,
project_id=TEST_PROJECT,
location=TEST_LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
@mock.patch("airflow.providers.google.cloud.operators.dataflow.DataflowHook")
def test_execute(self, mock_hook, create_operator):
"""
Test that operator creates and calls the Dataflow Data Pipeline hook with the correct parameters
"""
create_operator.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id="test_gcp_conn_id",
impersonation_chain=None,
)
mock_hook.return_value.create_data_pipeline.assert_called_once_with(
project_id=TEST_PROJECT, body=TEST_PIPELINE_BODY, location=TEST_LOCATION
)
def test_body_invalid(self, sdk_connection_not_found):
"""
Test that if the operator is not passed a Request Body, an AirflowException is raised
"""
init_kwargs = {
"task_id": "test_create_datapipeline",
"body": {},
"project_id": TEST_PROJECT,
"location": TEST_LOCATION,
"gcp_conn_id": GCP_CONN_ID,
}
with pytest.raises(AirflowException):
DataflowCreatePipelineOperator(**init_kwargs).execute(mock.MagicMock())
def test_projectid_invalid(self):
"""
Test that if the operator is not passed a Project ID, an AirflowException is raised
"""
init_kwargs = {
"task_id": "test_create_datapipeline",
"body": TEST_PIPELINE_BODY,
"project_id": None,
"location": TEST_LOCATION,
"gcp_conn_id": GCP_CONN_ID,
}
with pytest.raises(AirflowException):
DataflowCreatePipelineOperator(**init_kwargs).execute(mock.MagicMock())
def test_location_invalid(self):
"""
Test that if the operator is not passed a location, an AirflowException is raised
"""
init_kwargs = {
"task_id": "test_create_datapipeline",
"body": TEST_PIPELINE_BODY,
"project_id": TEST_PROJECT,
"location": None,
"gcp_conn_id": GCP_CONN_ID,
}
with pytest.raises(AirflowException):
DataflowCreatePipelineOperator(**init_kwargs).execute(mock.MagicMock())
def test_response_invalid(self, sdk_connection_not_found):
"""
Test that if the Response Body contains an error message, an AirflowException is raised
"""
init_kwargs = {
"task_id": "test_create_datapipeline",
"body": {"name": TEST_PIPELINE_NAME, "error": "Testing that AirflowException is raised"},
"project_id": TEST_PROJECT,
"location": TEST_LOCATION,
"gcp_conn_id": GCP_CONN_ID,
}
with pytest.raises(AirflowException):
DataflowCreatePipelineOperator(**init_kwargs).execute(mock.MagicMock())
@mock.patch("airflow.providers.google.cloud.operators.dataflow.DataflowHook")
def test_response_409(self, mock_hook, create_operator):
"""
Test that if the Pipeline already exists, the operator does not fail and retrieves existed Pipeline
"""
mock_hook.return_value.create_data_pipeline.side_effect = HttpError(
resp=httplib2.Response({"status": "409"}), content=b"content"
)
create_operator.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id="test_gcp_conn_id",
impersonation_chain=None,
)
mock_hook.return_value.get_data_pipeline.assert_called_once_with(
project_id=TEST_PROJECT, pipeline_name=TEST_PIPELINE_NAME, location=TEST_LOCATION
)
| TestDataflowCreatePipelineOperator |
python | pydantic__pydantic | pydantic/experimental/pipeline.py | {
"start": 22753,
"end": 22852
} | class ____(annotated_types.SupportsLe, annotated_types.SupportsGe, Protocol):
pass
| _SupportsRange |
python | docker__docker-py | docker/models/resource.py | {
"start": 0,
"end": 1313
} | class ____:
"""
A base class for representing a single object on the server.
"""
id_attribute = 'Id'
def __init__(self, attrs=None, client=None, collection=None):
#: A client pointing at the server that this object is on.
self.client = client
#: The collection that this model is part of.
self.collection = collection
#: The raw representation of this object from the API
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
def __repr__(self):
return f"<{self.__class__.__name__}: {self.short_id}>"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
return hash(f"{self.__class__.__name__}:{self.id}")
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs.get(self.id_attribute)
@property
def short_id(self):
"""
The ID of the object, truncated to 12 characters.
"""
return self.id[:12]
def reload(self):
"""
Load this object from the server again and update ``attrs`` with the
new data.
"""
new_model = self.collection.get(self.id)
self.attrs = new_model.attrs
| Model |
python | walkccc__LeetCode | solutions/876. Middle of the Linked List/876.py | {
"start": 0,
"end": 199
} | class ____:
def middleNode(self, head: ListNode) -> ListNode:
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
return slow
| Solution |
python | tensorflow__tensorflow | tensorflow/lite/python/util_test.py | {
"start": 1468,
"end": 6677
} | class ____(test_util.TensorFlowTestCase):
def testConvertEnumToDtype(self):
self.assertEqual(
util._convert_tflite_enum_type_to_tf_type(0), dtypes.float32)
self.assertEqual(
util._convert_tflite_enum_type_to_tf_type(1), dtypes.float16)
self.assertEqual(util._convert_tflite_enum_type_to_tf_type(2), dtypes.int32)
self.assertEqual(util._convert_tflite_enum_type_to_tf_type(3), dtypes.uint8)
self.assertEqual(util._convert_tflite_enum_type_to_tf_type(4), dtypes.int64)
self.assertEqual(
util._convert_tflite_enum_type_to_tf_type(5), dtypes.string)
self.assertEqual(util._convert_tflite_enum_type_to_tf_type(6), dtypes.bool)
self.assertEqual(util._convert_tflite_enum_type_to_tf_type(7), dtypes.int16)
self.assertEqual(
util._convert_tflite_enum_type_to_tf_type(8), dtypes.complex64)
self.assertEqual(util._convert_tflite_enum_type_to_tf_type(9), dtypes.int8)
self.assertEqual(
util._convert_tflite_enum_type_to_tf_type(10), dtypes.float64)
self.assertEqual(
util._convert_tflite_enum_type_to_tf_type(11), dtypes.complex128)
self.assertEqual(
util._convert_tflite_enum_type_to_tf_type(16), dtypes.uint32)
with self.assertRaises(ValueError) as error:
util._convert_tflite_enum_type_to_tf_type(20)
self.assertEqual(
"Unsupported enum 20. The valid map of enum to tf types is : "
"{0: tf.float32, 1: tf.float16, 2: tf.int32, 3: tf.uint8, 4: tf.int64, "
"5: tf.string, 6: tf.bool, 7: tf.int16, 8: tf.complex64, 9: tf.int8, "
"10: tf.float64, 11: tf.complex128, 16: tf.uint32}",
str(error.exception))
def testTensorName(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[4])
out_tensors = array_ops.split(
value=in_tensor, num_or_size_splits=[1, 1, 1, 1], axis=0)
expect_names = ["split", "split:1", "split:2", "split:3"]
for i in range(len(expect_names)):
got_name = util.get_tensor_name(out_tensors[i])
self.assertEqual(got_name, expect_names[i])
def testUint32PassThrough(self):
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(4,), dtype=tf.uint32),
tf.keras.layers.Reshape(target_shape=(2, 2))
])
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
interpreter = lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
self.assertEqual(input_details["dtype"], np.uint32)
self.assertEqual(output_details["dtype"], np.uint32)
in_array = np.array([[1, 1, 1, 1]], dtype="uint32") * ((1 << 32) - 1)
expected_out = np.reshape(in_array, (2, 2))
interpreter.set_tensor(input_details["index"], in_array)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details["index"])[0]
self.assertAllEqual(expected_out, output_data)
@test_util.enable_control_flow_v2
def testRemoveLowerUsingSwitchMerge(self):
with ops.Graph().as_default():
i = array_ops.placeholder(dtype=dtypes.int32, shape=())
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
while_loop.while_loop(c, b, [i])
sess = session.Session()
new_graph_def = convert_to_constants.disable_lower_using_switch_merge(
sess.graph_def)
lower_using_switch_merge_is_removed = False
for node in new_graph_def.node:
if node.op == "While" or node.op == "StatelessWhile":
if not node.attr["_lower_using_switch_merge"].b:
lower_using_switch_merge_is_removed = True
self.assertTrue(lower_using_switch_merge_is_removed)
def testConvertBytes(self):
source, header = util.convert_bytes_to_c_source(
b"\x00\x01\x02\x23", "foo", 16, use_tensorflow_license=False)
self.assertTrue(
source.find("const unsigned char foo[] DATA_ALIGN_ATTRIBUTE = {"))
self.assertTrue(source.find(""" 0x00, 0x01,
0x02, 0x23,"""))
self.assertNotEqual(-1, source.find("const int foo_len = 4;"))
self.assertEqual(-1, source.find("/* Copyright"))
self.assertEqual(-1, source.find("#include " ""))
self.assertNotEqual(-1, header.find("extern const unsigned char foo[];"))
self.assertNotEqual(-1, header.find("extern const int foo_len;"))
self.assertEqual(-1, header.find("/* Copyright"))
source, header = util.convert_bytes_to_c_source(
b"\xff\xfe\xfd\xfc",
"bar",
80,
include_guard="MY_GUARD",
include_path="my/guard.h",
use_tensorflow_license=True)
self.assertNotEqual(
-1, source.find("const unsigned char bar[] DATA_ALIGN_ATTRIBUTE = {"))
self.assertNotEqual(-1, source.find(""" 0xff, 0xfe, 0xfd, 0xfc,"""))
self.assertNotEqual(-1, source.find("/* Copyright"))
self.assertNotEqual(-1, source.find("#include \"my/guard.h\""))
self.assertNotEqual(-1, header.find("#ifndef MY_GUARD"))
self.assertNotEqual(-1, header.find("#define MY_GUARD"))
self.assertNotEqual(-1, header.find("/* Copyright"))
| UtilTest |
python | fastai__fastai | fastai/callback/mixup.py | {
"start": 3082,
"end": 5041
} | class ____(MixHandler):
"Implementation of https://arxiv.org/abs/1905.04899"
def __init__(self,
alpha:float=1. # Determine `Beta` distribution in range (0.,inf]
):
super().__init__(alpha)
def before_batch(self):
"Add `rand_bbox` patches with size based on `lam` and location chosen randomly."
bs, _, H, W = self.x.size()
self.lam = self.distrib.sample((1,)).to(self.x.device)
shuffle = torch.randperm(bs).to(self.x.device)
xb1,self.yb1 = self.x[shuffle], tuple((self.y[shuffle],))
x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)
self.learn.xb[0][..., y1:y2, x1:x2] = xb1[..., y1:y2, x1:x2]
self.lam = (1 - ((x2-x1)*(y2-y1))/float(W*H))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
def rand_bbox(self,
W:int, # Input image width
H:int, # Input image height
lam:Tensor # lambda sample from Beta distribution i.e tensor([0.3647])
) -> tuple: # Represents the top-left pixel location and the bottom-right pixel location
"Give a bounding box location based on the size of the im and a weight"
cut_rat = torch.sqrt(1. - lam).to(self.x.device)
cut_w = torch.round(W * cut_rat).type(torch.long).to(self.x.device)
cut_h = torch.round(H * cut_rat).type(torch.long).to(self.x.device)
# uniform
cx = torch.randint(0, W, (1,)).to(self.x.device)
cy = torch.randint(0, H, (1,)).to(self.x.device)
x1 = torch.clamp(cx - torch.div(cut_w, 2, rounding_mode='floor'), 0, W)
y1 = torch.clamp(cy - torch.div(cut_h, 2, rounding_mode='floor'), 0, H)
x2 = torch.clamp(cx + torch.div(cut_w, 2, rounding_mode='floor'), 0, W)
y2 = torch.clamp(cy + torch.div(cut_h, 2, rounding_mode='floor'), 0, H)
return x1, y1, x2, y2
| CutMix |
python | sphinx-doc__sphinx | sphinx/domains/rst.py | {
"start": 6972,
"end": 7370
} | class ____(ReSTMarkup):
"""Description of a reST role."""
def handle_signature(self, sig: str, signode: desc_signature) -> str:
desc_name = f':{sig}:'
signode['fullname'] = sig.strip()
signode += addnodes.desc_name(desc_name, desc_name)
return sig
def get_index_text(self, objectname: str, name: str) -> str:
return _('%s (role)') % name
| ReSTRole |
python | wandb__wandb | wandb/integration/lightgbm/__init__.py | {
"start": 2856,
"end": 7981
} | class ____:
"""Internal class to handle `wandb_callback` logic.
This callback is adapted form the LightGBM's `_RecordEvaluationCallback`.
"""
def __init__(self, log_params: bool = True, define_metric: bool = True) -> None:
self.order = 20
self.before_iteration = False
self.log_params = log_params
self.define_metric_bool = define_metric
def _init(self, env: "CallbackEnv") -> None:
with wb_telemetry.context() as tel:
tel.feature.lightgbm_wandb_callback = True
# log the params as W&B config.
if self.log_params:
wandb.config.update(env.params)
# use `define_metric` to set the wandb summary to the best metric value.
for item in env.evaluation_result_list:
if self.define_metric_bool:
if len(item) == 4:
data_name, eval_name = item[:2]
_define_metric(data_name, eval_name)
else:
data_name, eval_name = item[1].split()
_define_metric(data_name, f"{eval_name}-mean")
_define_metric(data_name, f"{eval_name}-stdv")
def __call__(self, env: "CallbackEnv") -> None:
if env.iteration == env.begin_iteration: # type: ignore
self._init(env)
for item in env.evaluation_result_list:
if len(item) == 4:
data_name, eval_name, result = item[:3]
wandb.log(
{data_name + "_" + eval_name: result},
commit=False,
)
else:
data_name, eval_name = item[1].split()
res_mean = item[2]
res_stdv = item[4]
wandb.log(
{
data_name + "_" + eval_name + "-mean": res_mean,
data_name + "_" + eval_name + "-stdv": res_stdv,
},
commit=False,
)
# call `commit=True` to log the data as a single W&B step.
wandb.log({"iteration": env.iteration}, commit=True)
def wandb_callback(log_params: bool = True, define_metric: bool = True) -> Callable:
"""Automatically integrates LightGBM with wandb.
Args:
log_params: (boolean) if True (default) logs params passed to lightgbm.train as W&B config
define_metric: (boolean) if True (default) capture model performance at the best step, instead of the last step, of training in your `wandb.summary`
Passing `wandb_callback` to LightGBM will:
- log params passed to lightgbm.train as W&B config (default).
- log evaluation metrics collected by LightGBM, such as rmse, accuracy etc to Weights & Biases
- Capture the best metric in `wandb.summary` when `define_metric=True` (default).
Use `log_summary` as an extension of this callback.
Example:
```python
params = {
"boosting_type": "gbdt",
"objective": "regression",
}
gbm = lgb.train(
params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=("validation"),
callbacks=[wandb_callback()],
)
```
"""
return _WandbCallback(log_params, define_metric)
def log_summary(
model: Booster, feature_importance: bool = True, save_model_checkpoint: bool = False
) -> None:
"""Log useful metrics about lightgbm model after training is done.
Args:
model: (Booster) is an instance of lightgbm.basic.Booster.
feature_importance: (boolean) if True (default), logs the feature importance plot.
save_model_checkpoint: (boolean) if True saves the best model and upload as W&B artifacts.
Using this along with `wandb_callback` will:
- log `best_iteration` and `best_score` as `wandb.summary`.
- log feature importance plot.
- save and upload your best trained model to Weights & Biases Artifacts (when `save_model_checkpoint = True`)
Example:
```python
params = {
"boosting_type": "gbdt",
"objective": "regression",
}
gbm = lgb.train(
params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=("validation"),
callbacks=[wandb_callback()],
)
log_summary(gbm)
```
"""
if wandb.run is None:
raise wandb.Error("You must call wandb.init() before WandbCallback()")
if not isinstance(model, Booster):
raise wandb.Error("Model should be an instance of lightgbm.basic.Booster")
wandb.run.summary["best_iteration"] = model.best_iteration
wandb.run.summary["best_score"] = model.best_score
# Log feature importance
if feature_importance:
_log_feature_importance(model)
if save_model_checkpoint:
_checkpoint_artifact(model, model.best_iteration, aliases=["best"])
with wb_telemetry.context() as tel:
tel.feature.lightgbm_log_summary = True
| _WandbCallback |
python | joke2k__faker | faker/providers/phone_number/hi_IN/__init__.py | {
"start": 49,
"end": 232
} | class ____(PhoneNumberProvider):
formats = (
"+91 ##########",
"+91 ### #######",
"0##-########",
"0##########",
"0#### ######",
)
| Provider |
python | walkccc__LeetCode | solutions/825. Friends Of Appropriate Ages/825.py | {
"start": 0,
"end": 336
} | class ____:
def numFriendRequests(self, ages: list[int]) -> int:
ans = 0
count = [0] * 121
for age in ages:
count[age] += 1
for i in range(15, 121):
ans += count[i] * (count[i] - 1)
for i in range(15, 121):
for j in range(i // 2 + 8, i):
ans += count[i] * count[j]
return ans
| Solution |
python | python-attrs__attrs | tests/test_dunders.py | {
"start": 26315,
"end": 27064
} | class ____:
"""
Tests for `NOTHING`.
"""
def test_copy(self):
"""
__copy__ returns the same object.
"""
n = NOTHING
assert n is copy.copy(n)
def test_deepcopy(self):
"""
__deepcopy__ returns the same object.
"""
n = NOTHING
assert n is copy.deepcopy(n)
def test_eq(self):
"""
All instances are equal.
"""
assert NOTHING == NOTHING == NOTHING
assert not (NOTHING != NOTHING)
assert 1 != NOTHING
def test_false(self):
"""
NOTHING evaluates as falsey.
"""
assert not NOTHING
assert False is bool(NOTHING)
@attr.s(unsafe_hash=True, order=True)
| TestNothing |
python | apache__airflow | providers/snowflake/tests/unit/snowflake/hooks/test_sql.py | {
"start": 1165,
"end": 9124
} | class ____(SnowflakeHook):
conn_name_attr = "snowflake_conn_id"
get_conn = MagicMock(name="conn")
def get_cursor_descriptions(fields: list[str]) -> list[tuple[str]]:
return [(field,) for field in fields]
@pytest.mark.parametrize(
(
"return_last",
"split_statements",
"sql",
"cursor_calls",
"cursor_descriptions",
"cursor_results",
"hook_descriptions",
"hook_results",
"return_dictionaries",
),
[
pytest.param(
True,
False,
"select * from test.test",
["select * from test.test"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[[("id",), ("value",)]],
[[1, 2], [11, 12]],
False,
id="The return_last set and no split statements set on single query in string",
),
pytest.param(
False,
False,
"select * from test.test;",
["select * from test.test;"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[[("id",), ("value",)]],
[[1, 2], [11, 12]],
False,
id="The return_last not set and no split statements set on single query in string",
),
pytest.param(
True,
True,
"select * from test.test;",
["select * from test.test;"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[[("id",), ("value",)]],
[[1, 2], [11, 12]],
False,
id="The return_last set and split statements set on single query in string",
),
pytest.param(
False,
True,
"select * from test.test;",
["select * from test.test;"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[[("id",), ("value",)]],
[[[1, 2], [11, 12]]],
False,
id="The return_last not set and split statements set on single query in string",
),
pytest.param(
True,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test;", "select * from test.test2;"],
[["id", "value"], ["id2", "value2"]],
([[1, 2], [11, 12]], [[3, 4], [13, 14]]),
[[("id2",), ("value2",)]],
[[3, 4], [13, 14]],
False,
id="The return_last set and split statements set on multiple queries in string",
), # Failing
pytest.param(
False,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test;", "select * from test.test2;"],
[["id", "value"], ["id2", "value2"]],
([[1, 2], [11, 12]], [[3, 4], [13, 14]]),
[[("id",), ("value",)], [("id2",), ("value2",)]],
[[[1, 2], [11, 12]], [[3, 4], [13, 14]]],
False,
id="The return_last not set and split statements set on multiple queries in string",
),
pytest.param(
True,
True,
["select * from test.test;"],
["select * from test.test"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[[("id",), ("value",)]],
[[[1, 2], [11, 12]]],
False,
id="The return_last set on single query in list",
),
pytest.param(
False,
True,
["select * from test.test;"],
["select * from test.test"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[[("id",), ("value",)]],
[[[1, 2], [11, 12]]],
False,
id="The return_last not set on single query in list",
),
pytest.param(
True,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test", "select * from test.test2"],
[["id", "value"], ["id2", "value2"]],
([[1, 2], [11, 12]], [[3, 4], [13, 14]]),
[[("id2",), ("value2",)]],
[[3, 4], [13, 14]],
False,
id="The return_last set on multiple queries in list",
),
pytest.param(
False,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test", "select * from test.test2"],
[["id", "value"], ["id2", "value2"]],
([[1, 2], [11, 12]], [[3, 4], [13, 14]]),
[[("id",), ("value",)], [("id2",), ("value2",)]],
[[[1, 2], [11, 12]], [[3, 4], [13, 14]]],
False,
id="The return_last not set on multiple queries not set",
),
pytest.param(
False,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test", "select * from test.test2"],
[["id", "value"], ["id2", "value2"]],
(
[{"id": 1, "value": 2}, {"id": 11, "value": 12}],
[{"id2": 3, "value2": 4}, {"id2": 13, "value2": 14}],
),
[[("id",), ("value",)], [("id2",), ("value2",)]],
[
[{"id": 1, "value": 2}, {"id": 11, "value": 12}],
[{"id2": 3, "value2": 4}, {"id2": 13, "value2": 14}],
],
True,
id="DictCursor: The return_last not set on multiple queries not set",
),
],
)
def test_query(
return_last,
split_statements,
sql,
cursor_calls,
cursor_descriptions,
cursor_results,
hook_descriptions,
hook_results,
return_dictionaries,
):
modified_descriptions = [
get_cursor_descriptions(cursor_description) for cursor_description in cursor_descriptions
]
dbapi_hook = SnowflakeHookForTests()
dbapi_hook.get_conn.return_value.cursor.return_value.rowcount = 2
dbapi_hook.get_conn.return_value.cursor.return_value._description_index = 0
def mock_execute(*args, **kwargs):
# the run method accesses description property directly, and we need to modify it after
# every execute, to make sure that different descriptions are returned. I could not find easier
# method with mocking
dbapi_hook.get_conn.return_value.cursor.return_value.description = modified_descriptions[
dbapi_hook.get_conn.return_value.cursor.return_value._description_index
]
dbapi_hook.get_conn.return_value.cursor.return_value._description_index += 1
dbapi_hook.get_conn.return_value.cursor.return_value.execute = mock_execute
dbapi_hook.get_conn.return_value.cursor.return_value.fetchall.side_effect = cursor_results
results = dbapi_hook.run(
sql=sql,
handler=fetch_all_handler,
return_last=return_last,
split_statements=split_statements,
return_dictionaries=return_dictionaries,
)
assert dbapi_hook.descriptions == hook_descriptions
assert dbapi_hook.last_description == hook_descriptions[-1]
assert results == hook_results
if return_dictionaries:
dbapi_hook.get_conn.return_value.cursor.assert_called_with(DictCursor)
else:
dbapi_hook.get_conn.return_value.cursor.assert_called_with()
dbapi_hook.get_conn.return_value.cursor.return_value.close.assert_called()
@pytest.mark.parametrize(
"empty_statement",
[
pytest.param([], id="Empty list"),
pytest.param("", id="Empty string"),
pytest.param("\n", id="Only EOL"),
],
)
def test_no_query(empty_statement):
dbapi_hook = SnowflakeHookForTests()
dbapi_hook.get_conn.return_value.cursor.rowcount = 0
with pytest.raises(ValueError, match="List of SQL statements is empty"):
dbapi_hook.run(sql=empty_statement)
| SnowflakeHookForTests |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sqlite_datasource.py | {
"start": 4898,
"end": 7879
} | class ____(SQLDatasource):
"""Adds a sqlite datasource to the data context.
Args:
name: The name of this sqlite datasource.
connection_string: The SQLAlchemy connection string used to connect to the sqlite database.
For example: "sqlite:///path/to/file.db"
create_temp_table: Whether to leverage temporary tables during metric computation.
assets: An optional dictionary whose keys are TableAsset names and whose values
are TableAsset objects.
"""
# class var definitions
asset_types: ClassVar[List[Type[DataAsset]]] = [SqliteTableAsset, SqliteQueryAsset]
# Subclass instance var overrides
# right side of the operator determines the type name
# left side enforces the names on instance creation
type: Literal["sqlite"] = "sqlite" # type: ignore[assignment] # FIXME CoP
connection_string: Union[ConfigStr, SqliteDsn]
_TableAsset: Type[SqlTableAsset] = pydantic.PrivateAttr(SqliteTableAsset)
_QueryAsset: Type[SqlQueryAsset] = pydantic.PrivateAttr(SqliteQueryAsset)
@property
@override
def execution_engine_type(self) -> Type[SqlAlchemyExecutionEngine]:
"""Returns the default execution engine type."""
return SqliteExecutionEngine
@public_api
@override
def add_table_asset(
self,
name: str,
table_name: str = "",
schema_name: Optional[str] = None,
batch_metadata: Optional[BatchMetadata] = None,
) -> SqliteTableAsset:
"""Adds a table asset to this SQLite datasource
Args:
name: The name of this table asset
table_name: The name of the database table
schema_name: The schema to which this table belongs
batch_metadata: An arbitrary dictionary for a caller to annotate the asset
Returns:
The SqliteTableAsset added
"""
return cast(
"SqliteTableAsset",
super().add_table_asset(
name=name,
table_name=table_name,
schema_name=schema_name,
batch_metadata=batch_metadata,
),
)
add_table_asset.__doc__ = SQLDatasource.add_table_asset.__doc__
@public_api
@override
def add_query_asset(
self,
name: str,
query: str,
batch_metadata: Optional[BatchMetadata] = None,
) -> SqliteQueryAsset:
"""Adds a query asset to this SQLite datasource
Args:
name: The name of this query asset
query: The SQL query
batch_metadata: An arbitrary dictionary for a caller to annotate the asset
Returns:
The SqliteQueryAsset added
"""
return cast(
"SqliteQueryAsset",
super().add_query_asset(name=name, query=query, batch_metadata=batch_metadata),
)
add_query_asset.__doc__ = SQLDatasource.add_query_asset.__doc__
| SqliteDatasource |
python | apache__airflow | providers/snowflake/src/airflow/providers/snowflake/transfers/copy_into_snowflake.py | {
"start": 1489,
"end": 13611
} | class ____(BaseOperator):
"""
Executes a COPY INTO command to load files from an external stage from clouds to Snowflake.
This operator requires the snowflake_conn_id connection. The snowflake host, login,
and, password field must be setup in the connection. Other inputs can be defined
in the connection or hook instantiation.
:param namespace: snowflake namespace
:param table: snowflake table
:param file_format: file format name i.e. CSV, AVRO, etc
:param stage: reference to a specific snowflake stage. If the stage's schema is not the same as the
table one, it must be specified
:param prefix: cloud storage location specified to limit the set of files to load
:param files: files to load into table
:param pattern: pattern to load files from external location to table
:param snowflake_conn_id: Reference to :ref:`Snowflake connection id<howto/connection:snowflake>`
:param account: snowflake account name
:param warehouse: name of snowflake warehouse
:param database: name of snowflake database
:param region: name of snowflake region
:param role: name of snowflake role
:param schema: name of snowflake schema
:param authenticator: authenticator for Snowflake.
'snowflake' (default) to use the internal Snowflake authenticator
'externalbrowser' to authenticate using your web browser and
Okta, ADFS or any other SAML 2.0-compliant identify provider
(IdP) that has been defined for your account
``https://<your_okta_account_name>.okta.com`` to authenticate
through native Okta.
:param session_parameters: You can set session-level parameters at
the time you connect to Snowflake
:param copy_options: snowflake COPY INTO syntax copy options
:param validation_mode: snowflake COPY INTO syntax validation mode
"""
template_fields: Sequence[str] = ("files",)
template_fields_renderers = {"files": "json"}
def __init__(
self,
*,
files: list | None = None,
table: str,
stage: str,
prefix: str | None = None,
file_format: str,
schema: str | None = None,
columns_array: list | None = None,
pattern: str | None = None,
warehouse: str | None = None,
database: str | None = None,
autocommit: bool = True,
snowflake_conn_id: str = "snowflake_default",
role: str | None = None,
authenticator: str | None = None,
session_parameters: dict | None = None,
copy_options: str | None = None,
validation_mode: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.files = files
self.table = _validate_parameter("table", table)
self.stage = _validate_parameter("stage", stage)
self.prefix = prefix
self.file_format = file_format
self.schema = schema
self.columns_array = columns_array
self.pattern = pattern
self.warehouse = warehouse
self.database = database
self.autocommit = autocommit
self.snowflake_conn_id = snowflake_conn_id
self.role = role
self.authenticator = authenticator
self.session_parameters = session_parameters
self.copy_options = copy_options
self.validation_mode = validation_mode
self.hook: SnowflakeHook | None = None
self._sql: str | None = None
self._result: list[dict[str, Any]] = []
def execute(self, context: Any) -> None:
self.hook = SnowflakeHook(
snowflake_conn_id=self.snowflake_conn_id,
warehouse=self.warehouse,
database=self.database,
role=self.role,
schema=self.schema,
authenticator=self.authenticator,
session_parameters=self.session_parameters,
)
if self.schema:
into = f"{self.schema}.{self.table}"
else:
into = self.table # type: ignore[assignment]
if self.columns_array:
into = f"{into}({', '.join(self.columns_array)})"
self._sql = f"""
COPY INTO {into}
FROM @{self.stage}/{self.prefix or ""}
{"FILES=(" + ",".join(map(enclose_param, self.files)) + ")" if self.files else ""}
{"PATTERN=" + enclose_param(self.pattern) if self.pattern else ""}
FILE_FORMAT={self.file_format}
{self.copy_options or ""}
{self.validation_mode or ""}
"""
self.log.info("Executing COPY command...")
self._result = self.hook.run( # type: ignore # mypy does not work well with return_dictionaries=True
sql=self._sql,
autocommit=self.autocommit,
handler=lambda x: x.fetchall(),
return_dictionaries=True,
)
self.log.info("COPY command completed")
@staticmethod
def _extract_openlineage_unique_dataset_paths(
query_result: list[dict[str, Any]],
) -> tuple[list[tuple[str, str]], list[str]]:
"""
Extract and return unique OpenLineage dataset paths and file paths that failed to be parsed.
Each row in the results is expected to have a 'file' field, which is a URI.
The function parses these URIs and constructs a set of unique OpenLineage (namespace, name) tuples.
Additionally, it captures any URIs that cannot be parsed or processed
and returns them in a separate error list.
For Azure, Snowflake has a unique way of representing URI:
azure://<account_name>.blob.core.windows.net/<container_name>/path/to/file.csv
that is transformed by this function to a Dataset with more universal naming convention:
Dataset(namespace="wasbs://container_name@account_name", name="path/to"), as described at
https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md#wasbs-azure-blob-storage
:param query_result: A list of dictionaries, each containing a 'file' key with a URI value.
:return: Two lists - the first is a sorted list of tuples, each representing a unique dataset path,
and the second contains any URIs that cannot be parsed or processed correctly.
>>> method = CopyFromExternalStageToSnowflakeOperator._extract_openlineage_unique_dataset_paths
>>> results = [{"file": "azure://my_account.blob.core.windows.net/azure_container/dir3/file.csv"}]
>>> method(results)
([('wasbs://azure_container@my_account', 'dir3/file.csv')], [])
>>> results = [{"file": "azure://my_account.blob.core.windows.net/azure_container"}]
>>> method(results)
([('wasbs://azure_container@my_account', '/')], [])
>>> results = [{"file": "s3://bucket"}, {"file": "gcs://bucket/"}, {"file": "s3://bucket/a.csv"}]
>>> method(results)
([('gcs://bucket', '/'), ('s3://bucket', '/'), ('s3://bucket', 'a.csv')], [])
>>> results = [{"file": "s3://bucket/dir/file.csv"}, {"file": "gcs://bucket/dir/dir2/a.txt"}]
>>> method(results)
([('gcs://bucket', 'dir/dir2/a.txt'), ('s3://bucket', 'dir/file.csv')], [])
>>> results = [
... {"file": "s3://bucket/dir/file.csv"},
... {"file": "azure://my_account.something_new.windows.net/azure_container"},
... ]
>>> method(results)
([('s3://bucket', 'dir/file.csv')], ['azure://my_account.something_new.windows.net/azure_container'])
>>> results = [
... {"file": "s3://bucket/dir/file.csv"},
... {"file": "s3:/invalid-s3-uri"},
... {"file": "gcs:invalid-gcs-uri"},
... ]
>>> method(results)
([('s3://bucket', 'dir/file.csv')], ['gcs:invalid-gcs-uri', 's3:/invalid-s3-uri'])
"""
import re
from urllib.parse import urlparse
azure_regex = r"azure:\/\/(\w+)?\.blob.core.windows.net\/(\w+)\/?(.*)?"
extraction_error_files = []
unique_dataset_paths = set()
for row in query_result:
try:
uri = urlparse(row["file"])
# Check for valid URI structure
if not uri.scheme or not uri.netloc:
extraction_error_files.append(row["file"])
continue
if uri.scheme == "azure":
match = re.fullmatch(azure_regex, row["file"])
if not match:
extraction_error_files.append(row["file"])
continue
account_name, container_name, name = match.groups()
namespace = f"wasbs://{container_name}@{account_name}"
else:
namespace = f"{uri.scheme}://{uri.netloc}"
name = uri.path.lstrip("/")
if name in ("", "."):
name = "/"
unique_dataset_paths.add((namespace, name))
except Exception:
extraction_error_files.append(row["file"])
return sorted(unique_dataset_paths), sorted(extraction_error_files)
def get_openlineage_facets_on_complete(self, task_instance):
"""Implement _on_complete because we rely on return value of a query."""
import re
from airflow.providers.common.compat.openlineage.facet import (
Dataset,
Error,
ExternalQueryRunFacet,
ExtractionErrorRunFacet,
SQLJobFacet,
)
from airflow.providers.openlineage.extractors import OperatorLineage
from airflow.providers.openlineage.sqlparser import SQLParser
if not self._sql:
return OperatorLineage()
query_results = self._result or []
# This typically happens when no files were processed (empty directory)
if len(query_results) == 1 and ("file" not in query_results[0] or query_results[0]["file"] is None):
query_results = []
unique_dataset_paths, extraction_error_files = self._extract_openlineage_unique_dataset_paths(
query_results
)
input_datasets = [Dataset(namespace=namespace, name=name) for namespace, name in unique_dataset_paths]
run_facets = {}
if extraction_error_files:
self.log.debug(
"Unable to extract Dataset namespace and name for the following files: `%s`.",
extraction_error_files,
)
run_facets["extractionError"] = ExtractionErrorRunFacet(
totalTasks=len(query_results),
failedTasks=len(extraction_error_files),
errors=[
Error(
errorMessage="Unable to extract Dataset namespace and name.",
stackTrace=None,
task=file_uri,
taskNumber=None,
)
for file_uri in extraction_error_files
],
)
connection = self.hook.get_connection(getattr(self.hook, str(self.hook.conn_name_attr)))
database_info = self.hook.get_openlineage_database_info(connection)
dest_name = self.table
schema = self.hook.get_openlineage_default_schema()
database = database_info.database
if schema:
dest_name = f"{schema}.{dest_name}"
if database:
dest_name = f"{database}.{dest_name}"
snowflake_namespace = SQLParser.create_namespace(database_info)
query = SQLParser.normalize_sql(self._sql)
query = re.sub(r"\n+", "\n", re.sub(r" +", " ", query))
run_facets["externalQuery"] = ExternalQueryRunFacet(
externalQueryId=self.hook.query_ids[0], source=snowflake_namespace
)
return OperatorLineage(
inputs=input_datasets,
outputs=[Dataset(namespace=snowflake_namespace, name=dest_name)],
job_facets={"sql": SQLJobFacet(query=query)},
run_facets=run_facets,
)
| CopyFromExternalStageToSnowflakeOperator |
python | python__mypy | mypyc/ir/rtypes.py | {
"start": 6421,
"end": 7106
} | class ____(RType):
"""The void type (no value).
This is a singleton -- use void_rtype (below) to refer to this instead of
constructing a new instance.
"""
is_unboxed = False
name = "void"
ctype = "void"
def accept(self, visitor: RTypeVisitor[T]) -> T:
return visitor.visit_rvoid(self)
@property
def may_be_immortal(self) -> bool:
return False
def serialize(self) -> str:
return "void"
def __eq__(self, other: object) -> TypeGuard[RVoid]:
return isinstance(other, RVoid)
def __hash__(self) -> int:
return hash(RVoid)
# Singleton instance of RVoid
void_rtype: Final = RVoid()
@final
| RVoid |
python | boto__boto3 | tests/unit/dynamodb/test_types.py | {
"start": 5017,
"end": 7236
} | class ____(unittest.TestCase):
def setUp(self):
self.deserializer = TypeDeserializer()
def test_deserialize_invalid_type(self):
with pytest.raises(TypeError, match=r'FOO is not supported'):
self.deserializer.deserialize({'FOO': 'bar'})
def test_deserialize_empty_structure(self):
with pytest.raises(TypeError, match=r'Value must be a nonempty'):
self.assertEqual(self.deserializer.deserialize({}), {})
def test_deserialize_null(self):
assert self.deserializer.deserialize({"NULL": True}) is None
def test_deserialize_boolean(self):
assert self.deserializer.deserialize({"BOOL": False}) is False
def test_deserialize_integer(self):
assert self.deserializer.deserialize({'N': '1'}) == Decimal('1')
def test_deserialize_decimal(self):
assert self.deserializer.deserialize({'N': '1.25'}) == Decimal('1.25')
def test_deserialize_string(self):
assert self.deserializer.deserialize({'S': 'foo'}) == 'foo'
def test_deserialize_binary(self):
assert self.deserializer.deserialize({'B': b'\x00'}) == Binary(b'\x00')
def test_deserialize_number_set(self):
assert self.deserializer.deserialize({'NS': ['1', '1.25']}) == {
Decimal('1'),
Decimal('1.25'),
}
def test_deserialize_string_set(self):
assert self.deserializer.deserialize({'SS': ['foo', 'bar']}) == {
'foo',
'bar',
}
def test_deserialize_binary_set(self):
assert self.deserializer.deserialize({'BS': [b'\x00', b'\x01']}) == {
Binary(b'\x00'),
Binary(b'\x01'),
}
def test_deserialize_list(self):
assert self.deserializer.deserialize(
{'L': [{'N': '1'}, {'S': 'foo'}, {'L': [{'N': '1.25'}]}]}
) == [Decimal('1'), 'foo', [Decimal('1.25')]]
def test_deserialize_map(self):
assert self.deserializer.deserialize(
{
'M': {
'foo': {'S': 'mystring'},
'bar': {'M': {'baz': {'N': '1'}}},
}
}
) == {'foo': 'mystring', 'bar': {'baz': Decimal('1')}}
| TestDeserializer |
python | django__django | tests/queries/tests.py | {
"start": 174560,
"end": 175249
} | class ____(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name="sc1", name="sc1")
sc2 = SpecialCategory.objects.create(special_name="sc2", name="sc2")
sc3 = SpecialCategory.objects.create(special_name="sc3", name="sc3")
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(
SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by("name"),
[sc2, sc3],
)
self.assertSequenceEqual(
SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]
)
| ForeignKeyToBaseExcludeTests |
python | TheAlgorithms__Python | graphs/bidirectional_breadth_first_search.py | {
"start": 3315,
"end": 6045
} | class ____:
"""
>>> bd_bfs = BidirectionalBreadthFirstSearch((0, 0), (len(grid) - 1,
... len(grid[0]) - 1))
>>> bd_bfs.fwd_bfs.start.pos == bd_bfs.bwd_bfs.target.pos
True
>>> bd_bfs.retrace_bidirectional_path(bd_bfs.fwd_bfs.start,
... bd_bfs.bwd_bfs.start)
[(0, 0)]
>>> bd_bfs.search() # doctest: +NORMALIZE_WHITESPACE
[(0, 0), (0, 1), (0, 2), (1, 2), (2, 2), (2, 3),
(2, 4), (3, 4), (3, 5), (3, 6), (4, 6), (5, 6), (6, 6)]
"""
def __init__(self, start, goal):
self.fwd_bfs = BreadthFirstSearch(start, goal)
self.bwd_bfs = BreadthFirstSearch(goal, start)
self.reached = False
def search(self) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
current_fwd_node = self.fwd_bfs.node_queue.pop(0)
current_bwd_node = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
self.reached = True
return self.retrace_bidirectional_path(
current_fwd_node, current_bwd_node
)
self.fwd_bfs.target = current_bwd_node
self.bwd_bfs.target = current_fwd_node
successors = {
self.fwd_bfs: self.fwd_bfs.get_successors(current_fwd_node),
self.bwd_bfs: self.bwd_bfs.get_successors(current_bwd_node),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(node)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def retrace_bidirectional_path(self, fwd_node: Node, bwd_node: Node) -> Path:
fwd_path = self.fwd_bfs.retrace_path(fwd_node)
bwd_path = self.bwd_bfs.retrace_path(bwd_node)
bwd_path.pop()
bwd_path.reverse()
path = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
init = (0, 0)
goal = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
start_bfs_time = time.time()
bfs = BreadthFirstSearch(init, goal)
path = bfs.search()
bfs_time = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
start_bd_bfs_time = time.time()
bd_bfs = BidirectionalBreadthFirstSearch(init, goal)
bd_path = bd_bfs.search()
bd_bfs_time = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| BidirectionalBreadthFirstSearch |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 15368,
"end": 16693
} | class ____(PrefectFilterBaseModel):
"""Filter by `FlowRun.start_time`."""
before_: Optional[DateTime] = Field(
default=None,
description="Only include flow runs starting at or before this time",
)
after_: Optional[DateTime] = Field(
default=None,
description="Only include flow runs starting at or after this time",
)
is_null_: Optional[bool] = Field(
default=None, description="If true, only return flow runs without a start time"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.before_ is not None:
filters.append(
coalesce(db.FlowRun.start_time, db.FlowRun.expected_start_time)
<= self.before_
)
if self.after_ is not None:
filters.append(
coalesce(db.FlowRun.start_time, db.FlowRun.expected_start_time)
>= self.after_
)
if self.is_null_ is not None:
filters.append(
db.FlowRun.start_time.is_(None)
if self.is_null_
else db.FlowRun.start_time.is_not(None)
)
return filters
| FlowRunFilterStartTime |
python | mlflow__mlflow | dev/clint/tests/rules/test_no_class_based_tests.py | {
"start": 508,
"end": 628
} | class ____:
def test_something(self):
pass
# Good - class without test methods (utility class)
| TestAnotherThing |
python | doocs__leetcode | solution/0900-0999/0964.Least Operators to Express Number/Solution.py | {
"start": 0,
"end": 454
} | class ____:
def leastOpsExpressTarget(self, x: int, target: int) -> int:
@cache
def dfs(v: int) -> int:
if x >= v:
return min(v * 2 - 1, 2 * (x - v))
k = 2
while x**k < v:
k += 1
if x**k - v < v:
return min(k + dfs(x**k - v), k - 1 + dfs(v - x ** (k - 1)))
return k - 1 + dfs(v - x ** (k - 1))
return dfs(target)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructorCallable1.py | {
"start": 2188,
"end": 2407
} | class ____(Generic[T1]):
x: T1
e1: Callable[[int], E[int]] = E
def func2(x: T1) -> E[T1]: ...
e2: Callable[[int], E[int]] = func2
def cast_to_callable(cls: Callable[P, T1]) -> Callable[P, T1]:
return cls
| E |
python | dabeaz-course__practical-python | Solutions/4_10/tableformat.py | {
"start": 1274,
"end": 1963
} | class ____(Exception):
pass
def create_formatter(name):
'''
Create an appropriate formatter given an output format name
'''
if name == 'txt':
return TextTableFormatter()
elif name == 'csv':
return CSVTableFormatter()
elif name == 'html':
return HTMLTableFormatter()
else:
raise FormatError(f'Unknown table format {name}')
def print_table(objects, columns, formatter):
'''
Make a nicely formatted table from a list of objects and attribute names.
'''
formatter.headings(columns)
for obj in objects:
rowdata = [ str(getattr(obj, name)) for name in columns ]
formatter.row(rowdata)
| FormatError |
python | django__django | tests/admin_views/models.py | {
"start": 16328,
"end": 16576
} | class ____(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField("Topping", related_name="pizzas")
# Pizza's ModelAdmin has readonly_fields = ['toppings'].
# toppings is editable for this model's admin.
| Pizza |
python | agronholm__apscheduler | src/apscheduler/triggers/combining.py | {
"start": 1137,
"end": 4353
} | class ____(BaseCombiningTrigger):
"""
Fires on times produced by the enclosed triggers whenever the fire times are within
the given threshold.
If the produced fire times are not within the given threshold of each other, the
trigger(s) that produced the earliest fire time will be asked for their next fire
time and the iteration is restarted. If instead all the triggers agree on a fire
time, all the triggers are asked for their next fire times and the earliest of the
previously produced fire times will be returned.
This trigger will be finished when any of the enclosed trigger has finished.
:param triggers: triggers to combine
:param threshold: maximum time difference between the next fire times of the
triggers in order for the earliest of them to be returned from :meth:`next` (in
seconds, or as timedelta)
:param max_iterations: maximum number of iterations of fire time calculations before
giving up
"""
threshold: timedelta = attrs.field(converter=as_timedelta, default=1)
max_iterations: int | None = 10000
def next(self) -> datetime | None:
if not self._next_fire_times:
# Fill out the fire times on the first run
self._next_fire_times = [t.next() for t in self.triggers]
for _ in range(self.max_iterations):
# Find the earliest and latest fire times
earliest_fire_time: datetime | None = None
latest_fire_time: datetime | None = None
for fire_time in self._next_fire_times:
# If any of the fire times is None, this trigger is finished
if fire_time is None:
return None
if earliest_fire_time is None or earliest_fire_time > fire_time:
earliest_fire_time = fire_time
if latest_fire_time is None or latest_fire_time < fire_time:
latest_fire_time = fire_time
# Replace all the fire times that were within the threshold
for i, _trigger in enumerate(self.triggers):
if self._next_fire_times[i] - earliest_fire_time <= self.threshold:
self._next_fire_times[i] = self.triggers[i].next()
# If all the fire times were within the threshold, return the earliest one
if latest_fire_time - earliest_fire_time <= self.threshold:
return earliest_fire_time
else:
raise MaxIterationsReached
def __getstate__(self) -> dict[str, Any]:
state = super().__getstate__()
state["threshold"] = self.threshold
state["max_iterations"] = self.max_iterations
return state
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
super().__setstate__(state)
self.threshold = state["threshold"]
self.max_iterations = state["max_iterations"]
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.triggers}, "
f"threshold={self.threshold.total_seconds()}, "
f"max_iterations={self.max_iterations})"
)
@attrs.define
| AndTrigger |
python | doocs__leetcode | solution/2200-2299/2204.Distance to a Cycle in Undirected Graph/Solution.py | {
"start": 0,
"end": 640
} | class ____:
def distanceToCycle(self, n: int, edges: List[List[int]]) -> List[int]:
g = defaultdict(set)
for a, b in edges:
g[a].add(b)
g[b].add(a)
q = deque(i for i in range(n) if len(g[i]) == 1)
f = [0] * n
seq = []
while q:
i = q.popleft()
seq.append(i)
for j in g[i]:
g[j].remove(i)
f[i] = j
if len(g[j]) == 1:
q.append(j)
g[i].clear()
ans = [0] * n
for i in seq[::-1]:
ans[i] = ans[f[i]] + 1
return ans
| Solution |
python | pypa__hatch | docs/.hooks/expand_blocks.py | {
"start": 1039,
"end": 1351
} | class ____(Preprocessor):
def run(self, lines): # noqa: PLR6301
markdown = "\n".join(lines)
markdown = _config_example_regex.sub(_config_example_replace, markdown)
markdown = _code_tab_regex.sub(_code_tab_replace, markdown)
return markdown.splitlines()
| ExpandedBlocksPreprocessor |
python | gevent__gevent | src/greentest/3.13/test_ssl.py | {
"start": 36165,
"end": 67563
} | class ____(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
if has_tls_protocol(protocol):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
expected = {
'AES128-GCM-SHA256',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES128-GCM-SHA256',
'DHE-RSA-AES128-GCM-SHA256',
'AES256-GCM-SHA384',
'ECDHE-ECDSA-AES256-GCM-SHA384',
'ECDHE-RSA-AES256-GCM-SHA384',
'DHE-RSA-AES256-GCM-SHA384',
}
intersection = names.intersection(expected)
self.assertGreaterEqual(
len(intersection), 2, f"\ngot: {sorted(names)}\nexpected: {sorted(expected)}"
)
def test_options(self):
# Test default SSLContext options
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
# disallow TLSv1
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
# allow TLSv1
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
# clear all options
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
# invalid options
with self.assertRaises(OverflowError):
ctx.options = -1
with self.assertRaises(OverflowError):
ctx.options = 2 ** 100
with self.assertRaises(TypeError):
ctx.options = "abc"
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.TLSv1_1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
if has_tls_protocol(ssl.PROTOCOL_TLSv1_1):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM (lib|routines)"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM (lib|routines)"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM (lib|routines)"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM (lib|routines)"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM (lib|routines)"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Allow for flexible libssl error messages.
regex = re.compile(r"""(
key values mismatch # OpenSSL
|
KEY_VALUES_MISMATCH # AWS-LC
)""", re.X)
with self.assertRaisesRegex(ssl.SSLError, regex):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM (lib|routines)"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(
ssl.SSLError,
"no start line: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(
ssl.SSLError,
"not enough data: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata=b"broken")
with self.assertRaises(ssl.SSLError):
ctx.load_verify_locations(cadata=cacert_der + b"A")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.load_dh_params(DHFILE)
except RuntimeError:
if Py_DEBUG_WIN32:
self.skipTest("not supported on Win32 debug build")
raise
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(support.Py_DEBUG,
"Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
self.assertEqual(ctx.options & ssl.OP_LEGACY_SERVER_CONNECT,
0 if IS_OPENSSL_3_0_0 else ssl.OP_LEGACY_SERVER_CONNECT)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.verify_flags & ssl.VERIFY_X509_PARTIAL_CHAIN,
ssl.VERIFY_X509_PARTIAL_CHAIN)
self.assertEqual(ctx.verify_flags & ssl.VERIFY_X509_STRICT,
ssl.VERIFY_X509_STRICT)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
if has_tls_protocol(ssl.PROTOCOL_TLSv1):
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_side=True)
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
| ContextTests |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/views/test_user_stats.py | {
"start": 2317,
"end": 2592
} | class ____:
def test_user_stats(self, client_user_stats_reader, recwarn):
resp = client_user_stats_reader.get("/userstatschartview/chart", follow_redirects=True)
_assert_dataset_deprecation_warning(recwarn)
assert resp.status_code == 200
| TestUserStats |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_sql.py | {
"start": 12417,
"end": 16480
} | class ____(CloudSQLBaseOperator):
"""
Create a new Cloud SQL instance.
If an instance with the same name exists, no action will be taken and
the operator will succeed.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLCreateInstanceOperator`
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert
#request-body
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param validate_body: True if body should be validated, False otherwise.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_create_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_create_template_fields]
ui_color = "#FADBDA"
operator_extra_links = (CloudSQLInstanceLink(),)
def __init__(
self,
*,
body: dict,
instance: str,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body = body
self.validate_body = validate_body
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
def _validate_body_fields(self) -> None:
if self.validate_body:
GcpBodyFieldValidator(CLOUD_SQL_CREATE_VALIDATION, api_version=self.api_version).validate(
self.body
)
def execute(self, context: Context) -> None:
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_body_fields()
if not self._check_if_instance_exists(self.instance, hook):
hook.create_instance(project_id=self.project_id, body=self.body)
else:
self.log.info("Cloud SQL instance with ID %s already exists. Aborting create.", self.instance)
CloudSQLInstanceLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
instance_resource = hook.get_instance(project_id=self.project_id, instance=self.instance)
service_account_email = instance_resource["serviceAccountEmailAddress"]
task_instance = context["task_instance"]
task_instance.xcom_push(key="service_account_email", value=service_account_email)
| CloudSQLCreateInstanceOperator |
python | pydantic__pydantic | tests/typechecking/decorators.py | {
"start": 10666,
"end": 11790
} | class ____(BaseModel):
a: int = 1
@field_serializer('a', mode='wrap')
def no_handler(self, value: Any) -> Any:
"""TODO This shouldn't be valid.
At runtime, `inspect_field_serializer` raises an error, as the `handler` argument is missing.
However, there's no type checking error as the provided signature matches
`pydantic_core.core_schema.GeneralWrapNoInfoSerializerFunction`.
"""
@field_serializer('a', mode='wrap') # type: ignore[type-var] # pyright: ignore[reportArgumentType]
@staticmethod
def staticmethod_no_handler(value: Any) -> Any: ...
@field_serializer('a', mode='wrap')
def valid_no_info(self, value: Any, handler: SerializerFunctionWrapHandler) -> Any: ...
@field_serializer('a', mode='wrap')
def valid_info_default(
self, value: Any, handler: SerializerFunctionWrapHandler, info: FieldSerializationInfo
) -> Any: ...
@field_serializer('a', mode='wrap')
def valid_info(
self, value: Any, handler: SerializerFunctionWrapHandler, info: FieldSerializationInfo[int]
) -> Any: ...
| WrapFieldSerializer |
python | pypa__hatch | src/hatch/venv/core.py | {
"start": 221,
"end": 3701
} | class ____:
IGNORED_ENV_VARS = ("__PYVENV_LAUNCHER__", "PYTHONHOME")
def __init__(self, directory, platform, verbosity=0):
self.directory = directory
self.platform = platform
self.verbosity = verbosity
self.python_info = PythonInfo(platform)
self._env_vars_to_restore = {}
self._executables_directory = None
def activate(self):
self._env_vars_to_restore["VIRTUAL_ENV"] = os.environ.pop("VIRTUAL_ENV", None)
os.environ["VIRTUAL_ENV"] = str(self.directory)
old_path = os.environ.pop("PATH", None)
self._env_vars_to_restore["PATH"] = old_path
if old_path is None:
os.environ["PATH"] = f"{self.executables_directory}{os.pathsep}{os.defpath}"
else:
os.environ["PATH"] = f"{self.executables_directory}{os.pathsep}{old_path}"
for env_var in self.IGNORED_ENV_VARS:
self._env_vars_to_restore[env_var] = os.environ.pop(env_var, None)
def deactivate(self):
for env_var, value in self._env_vars_to_restore.items():
if value is None:
os.environ.pop(env_var, None)
else:
os.environ[env_var] = value
self._env_vars_to_restore.clear()
def create(self, python, *, allow_system_packages=False):
# WARNING: extremely slow import
from virtualenv import cli_run
self.directory.ensure_parent_dir_exists()
command = [str(self.directory), "--no-download", "--no-periodic-update", "--python", python]
if allow_system_packages:
command.append("--system-site-packages")
# Decrease verbosity since the virtualenv CLI defaults to something like +2 verbosity
add_verbosity_flag(command, self.verbosity, adjustment=-1)
cli_run(command)
def remove(self):
self.directory.remove()
def exists(self):
return self.directory.is_dir()
@property
def executables_directory(self):
if self._executables_directory is None:
exe_dir = self.directory / ("Scripts" if self.platform.windows else "bin")
if exe_dir.is_dir():
self._executables_directory = exe_dir
# PyPy
elif self.platform.windows:
exe_dir = self.directory / "bin"
if exe_dir.is_dir():
self._executables_directory = exe_dir
else:
msg = f"Unable to locate executables directory within: {self.directory}"
raise OSError(msg)
# Debian
elif (self.directory / "local").is_dir(): # no cov
exe_dir = self.directory / "local" / "bin"
if exe_dir.is_dir():
self._executables_directory = exe_dir
else:
msg = f"Unable to locate executables directory within: {self.directory}"
raise OSError(msg)
else:
msg = f"Unable to locate executables directory within: {self.directory}"
raise OSError(msg)
return self._executables_directory
@property
def environment(self):
return self.python_info.environment
@property
def sys_path(self):
return self.python_info.sys_path
def __enter__(self):
self.activate()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.deactivate()
| VirtualEnv |
python | celery__celery | t/unit/backends/test_redis.py | {
"start": 1575,
"end": 1982
} | class ____(conftest.MockCallbacks):
def __init__(self, ignore_subscribe_messages=False):
self._subscribed_to = set()
def close(self):
self._subscribed_to = set()
def subscribe(self, *args):
self._subscribed_to.update(args)
def unsubscribe(self, *args):
self._subscribed_to.difference_update(args)
def get_message(self, timeout=None):
pass
| PubSub |
python | scikit-image__scikit-image | tests/skimage/segmentation/test_watershed.py | {
"start": 3152,
"end": 33379
} | class ____(unittest.TestCase):
eight = np.ones((3, 3), bool)
def test_watershed01(self):
"watershed 1"
data = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
np.uint8,
)
markers = np.array(
[
[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
np.int8,
)
out = watershed(data, markers, self.eight)
expected = np.array(
[
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
]
)
error = diff(expected, out)
assert error < eps
def test_watershed02(self):
"watershed 2"
data = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
np.uint8,
)
markers = np.array(
[
[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
np.int8,
)
out = watershed(data, markers)
error = diff(
[
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
],
out,
)
self.assertTrue(error < eps)
def test_watershed03(self):
"watershed 3"
data = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
np.uint8,
)
markers = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
],
np.int8,
)
out = watershed(data, markers)
error = diff(
[
[-1, -1, -1, -1, -1, -1, -1],
[-1, 0, 2, 0, 3, 0, -1],
[-1, 2, 2, 0, 3, 3, -1],
[-1, 2, 2, 0, 3, 3, -1],
[-1, 2, 2, 0, 3, 3, -1],
[-1, 0, 2, 0, 3, 0, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
],
out,
)
self.assertTrue(error < eps)
def test_watershed04(self):
"watershed 4"
data = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
np.uint8,
)
markers = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
],
np.int8,
)
out = watershed(data, markers, self.eight)
error = diff(
[
[-1, -1, -1, -1, -1, -1, -1],
[-1, 2, 2, 0, 3, 3, -1],
[-1, 2, 2, 0, 3, 3, -1],
[-1, 2, 2, 0, 3, 3, -1],
[-1, 2, 2, 0, 3, 3, -1],
[-1, 2, 2, 0, 3, 3, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
],
out,
)
self.assertTrue(error < eps)
def test_watershed05(self):
"watershed 5"
data = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
np.uint8,
)
markers = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
],
np.int8,
)
out = watershed(data, markers, self.eight)
error = diff(
[
[-1, -1, -1, -1, -1, -1, -1],
[-1, 3, 3, 0, 2, 2, -1],
[-1, 3, 3, 0, 2, 2, -1],
[-1, 3, 3, 0, 2, 2, -1],
[-1, 3, 3, 0, 2, 2, -1],
[-1, 3, 3, 0, 2, 2, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
],
out,
)
self.assertTrue(error < eps)
def test_watershed06(self):
"watershed 6"
data = np.array(
[
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
np.uint8,
)
markers = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-1, 0, 0, 0, 0, 0, 0],
],
np.int8,
)
out = watershed(data, markers, self.eight)
error = diff(
[
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
],
out,
)
self.assertTrue(error < eps)
def test_watershed07(self):
"A regression test of a competitive case that failed"
data = blob
mask = data != 255
markers = np.zeros(data.shape, int)
markers[6, 7] = 1
markers[14, 7] = 2
out = watershed(data, markers, self.eight, mask=mask)
#
# The two objects should be the same size, except possibly for the
# border region
#
size1 = np.sum(out == 1)
size2 = np.sum(out == 2)
self.assertTrue(abs(size1 - size2) <= 6)
def test_watershed08(self):
"The border pixels + an edge are all the same value"
data = blob.copy()
data[10, 7:9] = 141
mask = data != 255
markers = np.zeros(data.shape, int)
markers[6, 7] = 1
markers[14, 7] = 2
out = watershed(data, markers, self.eight, mask=mask)
#
# The two objects should be the same size, except possibly for the
# border region
#
size1 = np.sum(out == 1)
size2 = np.sum(out == 2)
self.assertTrue(abs(size1 - size2) <= 6)
def test_watershed09(self):
"""Test on an image of reasonable size
This is here both for timing (does it take forever?) and to
ensure that the memory constraints are reasonable
"""
image = np.zeros((1000, 1000))
coords = np.random.uniform(0, 1000, (100, 2)).astype(int)
markers = np.zeros((1000, 1000), int)
idx = 1
for x, y in coords:
image[x, y] = 1
markers[x, y] = idx
idx += 1
image = gaussian(image, sigma=4, mode='reflect')
watershed(image, markers, self.eight)
ndi.watershed_ift(image.astype(np.uint16), markers, self.eight)
def test_watershed10(self):
"watershed 10"
data = np.array(
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], np.uint8
)
markers = np.array(
[[1, 0, 0, 2], [0, 0, 0, 0], [0, 0, 0, 0], [3, 0, 0, 4]], np.int8
)
out = watershed(data, markers, self.eight)
error = diff([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]], out)
self.assertTrue(error < eps)
def test_watershed11(self):
'''Make sure that all points on this plateau are assigned to closest seed'''
# https://github.com/scikit-image/scikit-image/issues/803
#
# Make sure that no point in a level image is farther away
# from its seed than any other
#
image = np.zeros((21, 21))
markers = np.zeros((21, 21), int)
markers[5, 5] = 1
markers[5, 10] = 2
markers[10, 5] = 3
markers[10, 10] = 4
structure = np.array(
[[False, True, False], [True, True, True], [False, True, False]]
)
out = watershed(image, markers, structure)
i, j = np.mgrid[0:21, 0:21]
d = np.dstack(
[
np.sqrt((i.astype(float) - i0) ** 2, (j.astype(float) - j0) ** 2)
for i0, j0 in ((5, 5), (5, 10), (10, 5), (10, 10))
]
)
dmin = np.min(d, 2)
self.assertTrue(np.all(d[i, j, out[i, j] - 1] == dmin))
def test_watershed12(self):
"The watershed line"
data = np.array(
[
[
203,
255,
203,
153,
153,
153,
153,
153,
153,
153,
153,
153,
153,
153,
153,
153,
],
[
203,
255,
203,
153,
153,
153,
102,
102,
102,
102,
102,
102,
153,
153,
153,
153,
],
[
203,
255,
203,
203,
153,
153,
102,
102,
77,
0,
102,
102,
153,
153,
203,
203,
],
[
203,
255,
255,
203,
153,
153,
153,
102,
102,
102,
102,
153,
153,
203,
203,
255,
],
[
203,
203,
255,
203,
203,
203,
153,
153,
153,
153,
153,
153,
203,
203,
255,
255,
],
[
153,
203,
255,
255,
255,
203,
203,
203,
203,
203,
203,
203,
203,
255,
255,
203,
],
[
153,
203,
203,
203,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
203,
203,
],
[
153,
153,
153,
203,
203,
203,
203,
203,
255,
203,
203,
203,
203,
203,
203,
153,
],
[
102,
102,
153,
153,
153,
153,
203,
203,
255,
203,
203,
255,
203,
153,
153,
153,
],
[
102,
102,
102,
102,
102,
153,
203,
255,
255,
203,
203,
203,
203,
153,
102,
153,
],
[
102,
51,
51,
102,
102,
153,
203,
255,
203,
203,
153,
153,
153,
153,
102,
153,
],
[
77,
51,
51,
102,
153,
153,
203,
255,
203,
203,
203,
153,
102,
102,
102,
153,
],
[
77,
0,
51,
102,
153,
203,
203,
255,
203,
255,
203,
153,
102,
51,
102,
153,
],
[
77,
0,
51,
102,
153,
203,
255,
255,
203,
203,
203,
153,
102,
0,
102,
153,
],
[
102,
0,
51,
102,
153,
203,
255,
203,
203,
153,
153,
153,
102,
102,
102,
153,
],
[
102,
102,
102,
102,
153,
203,
255,
203,
153,
153,
153,
153,
153,
153,
153,
153,
],
]
)
markerbin = data == 0
marker = label(markerbin)
ws = watershed(data, marker, connectivity=2, watershed_line=True)
for lab, area in zip(range(4), [34, 74, 74, 74]):
self.assertTrue(np.sum(ws == lab) == area)
def test_watershed_input_not_modified(self):
"""Test to ensure input markers are not modified."""
image = np.random.default_rng().random(size=(21, 21))
markers = np.zeros((21, 21), dtype=np.uint8)
markers[[5, 5, 15, 15], [5, 15, 5, 15]] = [1, 2, 3, 4]
original_markers = np.copy(markers)
result = watershed(image, markers)
np.testing.assert_equal(original_markers, markers)
assert not np.all(result == markers)
def test_compact_watershed():
# in this test, when compactness is greater than zero the watershed line
# is labeled with the closest marker (label=2)
# when compactness is zero the watershed line is labeled with
# the marker that reaches it first (label=1)
# because it has a zero cost path to the line.
image = np.zeros((5, 6))
image[:, 3] = 2 # watershed line
image[:, 4:] = 1
seeds = np.zeros((5, 6), dtype=int)
seeds[2, 0] = 1
seeds[2, 5] = 2
compact = watershed(image, seeds, compactness=0.01)
expected = np.array(
[
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
],
dtype=int,
)
np.testing.assert_equal(compact, expected)
normal = watershed(image, seeds)
expected = np.array(
[
[1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 2, 2],
],
dtype=int,
)
np.testing.assert_equal(normal, expected)
# checks that compact watershed labels with watershed lines are
# a subset of the labels from compact watershed for this specific example
compact_wsl = watershed(image, seeds, compactness=0.01, watershed_line=True)
difference = compact_wsl != compact
difference[compact_wsl == 0] = False
assert not np.any(difference)
def test_watershed_with_markers_offset():
"""
Check edge case behavior reported in gh-6632
While we initially viewed the behavior described in gh-6632 [1]_ as a bug,
we have reverted that decision in gh-7661. See [2]_ for an explanation.
So this test now actually asserts the behavior reported in gh-6632 as
correct.
.. [1] https://github.com/scikit-image/scikit-image/issues/6632.
.. [2] https://github.com/scikit-image/scikit-image/issues/7661#issuecomment-2645810807
"""
# Generate an initial image with two overlapping circles
x, y = np.indices((80, 80))
x1, y1, x2, y2 = 28, 28, 44, 52
r1, r2 = 16, 20
mask_circle1 = (x - x1) ** 2 + (y - y1) ** 2 < r1**2
mask_circle2 = (x - x2) ** 2 + (y - y2) ** 2 < r2**2
image = np.logical_or(mask_circle1, mask_circle2)
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
# and then apply an y-offset
distance = ndi.distance_transform_edt(image)
coords = peak_local_max(distance, footprint=np.ones((3, 3)), labels=image)
coords[:, 0] += 6
mask = np.zeros(distance.shape, dtype=bool)
mask[tuple(coords.T)] = True
markers, _ = ndi.label(mask)
labels = watershed(-distance, markers, mask=image)
props = skimage.measure.regionprops(labels, intensity_image=-distance)
# Generally, assert that the smaller object could only conquer a thin line
# in the direction of the positive gradient
assert props[0].extent == 1
expected_region = np.arange(start=-10, stop=0, dtype=float).reshape(-1, 1)
np.testing.assert_equal(props[0].image_intensity, expected_region)
# Assert pixel count from reviewed reproducing example in bug report
assert props[0].num_pixels == 10
assert props[1].num_pixels == 1928
def test_watershed_simple_basin_overspill():
"""
Test edge case behavior when markers spill over into another basin / compete.
While we initially viewed the behavior described in gh-6632 [1]_ as a bug,
we have reverted that decision in gh-7661. See [2]_ for an explanation.
So this test now actually asserts the behavior reported in gh-6632 as
correct.
.. [1] https://github.com/scikit-image/scikit-image/issues/6632.
.. [2] https://github.com/scikit-image/scikit-image/issues/7661#issuecomment-2645810807
"""
# Scenario 1
# fmt: off
image = np.array([[6, 5, 4, 3, 0, 3, 0, 1, 2],
[6, 5, 4, 3, 0, 3, 0, 1, 2]])
markers = np.array([[0, 1, 0, 0, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
expected = np.array([[1, 1, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2]])
# fmt: on
result = watershed(image, markers=markers)
np.testing.assert_equal(result, expected)
# Scenario 2
image = -np.array([1, 2, 2, 2, 2, 2, 3])
markers = np.array([1, 0, 0, 0, 0, 0, 2])
expected = np.array([1, 2, 2, 2, 2, 2, 2])
result = watershed(image, markers=markers, mask=image != 0)
np.testing.assert_array_equal(result, expected)
def test_watershed_evenly_distributed_overspill():
"""
Edge case: Basins should be distributed evenly between contesting markers.
Markers should be prevented from spilling over into another basin and
conquering it against other markers with the same claim, just because they
get to the basin one step earlier.
"""
# Scenario 1: markers start with the same value
image = np.array([0, 2, 1, 1, 1, 1, 1, 1, 2, 0]) # fmt: skip
markers = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 2]) # fmt: skip
expected = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2]) # fmt: skip
result = watershed(image, markers=markers)
np.testing.assert_equal(result, expected)
# Scenario 2: markers start with the different values
image = np.array([2, 2, 1, 1, 1, 1, 1, 1, 2, 0]) # fmt: skip
expected = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2]) # fmt: skip
result = watershed(image, markers=markers)
np.testing.assert_equal(result, expected)
def test_markers_on_maxima():
"""Check that markers placed at maxima don't conquer other pixels.
Regression test for gh-7661 [1]_.
.. [1] https://github.com/scikit-image/scikit-image/issues/7661
"""
image = np.array([[0, 1, 2, 3, 4, 5, 4],
[0, 1, 2, 3, 4, 4, 4]]) # fmt: skip
markers = np.array([[1, 0, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 0]]) # fmt: skip
expected = np.array([[1, 1, 1, 1, 1, 2, 1],
[1, 1, 1, 1, 1, 1, 1]]) # fmt: skip
result = watershed(image, markers=markers)
np.testing.assert_equal(result, expected)
def test_numeric_seed_watershed():
"""Test that passing just the number of seeds to watershed works."""
image = np.zeros((5, 6))
image[:, 3:] = 1
compact = watershed(image, 2, compactness=0.01)
expected = np.array(
[
[1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 2, 2],
],
dtype=np.int32,
)
np.testing.assert_equal(compact, expected)
@pytest.mark.parametrize(
'dtype',
[np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64],
)
def test_watershed_output_dtype(dtype):
image = np.zeros((100, 100))
markers = np.zeros((100, 100), dtype)
out = watershed(image, markers)
assert out.dtype == markers.dtype
def test_incorrect_markers_shape():
image = np.ones((5, 6))
markers = np.ones((5, 7))
with pytest.raises(ValueError):
watershed(image, markers)
def test_incorrect_mask_shape():
image = np.ones((5, 6))
mask = np.ones((5, 7))
with pytest.raises(ValueError):
watershed(image, markers=4, mask=mask)
def test_markers_in_mask():
data = blob
mask = data != 255
out = watershed(data, 25, connectivity=2, mask=mask)
# There should be no markers where the mask is false
assert np.all(out[~mask] == 0)
def test_no_markers():
data = blob
mask = data != 255
out = watershed(data, mask=mask)
assert np.max(out) == 2
def test_connectivity():
"""
Watershed segmentation should output different result for
different connectivity
when markers are calculated where None is supplied.
Issue = 5084
"""
# Generate a dummy BrightnessTemperature image
x, y = np.indices((406, 270))
x1, y1, x2, y2, x3, y3, x4, y4 = 200, 208, 300, 120, 100, 100, 340, 208
r1, r2, r3, r4 = 100, 50, 40, 80
mask_circle1 = (x - x1) ** 2 + (y - y1) ** 2 < r1**2
mask_circle2 = (x - x2) ** 2 + (y - y2) ** 2 < r2**2
mask_circle3 = (x - x3) ** 2 + (y - y3) ** 2 < r3**2
mask_circle4 = (x - x4) ** 2 + (y - y4) ** 2 < r4**2
image = np.logical_or(mask_circle1, mask_circle2)
image = np.logical_or(image, mask_circle3)
image = np.logical_or(image, mask_circle4)
# calculate distance in discrete increase
DummyBT = ndi.distance_transform_edt(image)
DummyBT_dis = np.around(DummyBT / 12, decimals=0) * 12
# calculate the mask
Img_mask = np.where(DummyBT_dis == 0, 0, 1)
# segments for connectivity 1 and 2
labels_c1 = watershed(
200 - DummyBT_dis, mask=Img_mask, connectivity=1, compactness=0.01
)
labels_c2 = watershed(
200 - DummyBT_dis, mask=Img_mask, connectivity=2, compactness=0.01
)
# assertions
assert np.unique(labels_c1).shape[0] == 6
assert np.unique(labels_c2).shape[0] == 5
# checking via area of each individual segment.
for lab, area in zip(range(6), [61824, 3653, 20467, 11097, 1301, 11278]):
assert np.sum(labels_c1 == lab) == area
for lab, area in zip(range(5), [61824, 3653, 20466, 12386, 11291]):
assert np.sum(labels_c2 == lab) == area
| TestWatershed |
python | openai__openai-python | src/openai/types/fine_tuning/dpo_method.py | {
"start": 228,
"end": 377
} | class ____(BaseModel):
hyperparameters: Optional[DpoHyperparameters] = None
"""The hyperparameters used for the DPO fine-tuning job."""
| DpoMethod |
python | google__pytype | pytype/pytd/transforms_test.py | {
"start": 150,
"end": 2563
} | class ____(parser_test_base.ParserTest):
"""Tests the code in transforms.py."""
def test_remove_mutable_list(self):
# Simple test for RemoveMutableParameters, with simplified list class
src = textwrap.dedent("""
from typing import Union
T = TypeVar('T')
T2 = TypeVar('T2')
class TrivialList(typing.Generic[T], object):
def append(self, v: T2) -> NoneType:
self = Union[T, T2]
class TrivialList2(typing.Generic[T], object):
def append(self, v: T2) -> NoneType:
self = Union[T, T2]
def get_first(self) -> T: ...
""")
expected = textwrap.dedent("""
T = TypeVar('T')
T2 = TypeVar('T2')
class TrivialList(typing.Generic[T], object):
def append(self, v: T) -> NoneType: ...
class TrivialList2(typing.Generic[T], object):
def append(self, v: T) -> NoneType: ...
def get_first(self) -> T: ...
""")
ast = self.Parse(src)
ast = transforms.RemoveMutableParameters(ast)
self.AssertSourceEquals(ast, expected)
def test_remove_mutable_dict(self):
# Test for RemoveMutableParameters, with simplified dict class.
src = textwrap.dedent("""
from typing import Union
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
K2 = TypeVar('K2')
V2 = TypeVar('V2')
class MyDict(typing.Generic[K, V], object):
def getitem(self, k: K, default: T) -> Union[V, T]: ...
def setitem(self, k: K2, value: V2) -> NoneType:
self = dict[Union[K, K2], Union[V, V2]]
def getanykeyorvalue(self) -> Union[K, V]: ...
def setdefault(self, k: K2, v: V2) -> Union[V, V2]:
self = dict[Union[K, K2], Union[V, V2]]
""")
expected = textwrap.dedent("""
from typing import Union
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
K2 = TypeVar('K2')
V2 = TypeVar('V2')
class MyDict(typing.Generic[K, V], object):
def getitem(self, k: K, default: V) -> V: ...
def setitem(self, k: K, value: V) -> NoneType: ...
def getanykeyorvalue(self) -> Union[K, V]: ...
def setdefault(self, k: K, v: V) -> V: ...
""")
ast = self.Parse(src)
ast = transforms.RemoveMutableParameters(ast)
self.AssertSourceEquals(ast, expected)
if __name__ == "__main__":
unittest.main()
| TestTransforms |
python | pypa__pip | src/pip/_internal/network/session.py | {
"start": 10397,
"end": 10698
} | class ____(CacheControlAdapter):
def cert_verify(
self,
conn: ConnectionPool,
url: str,
verify: bool | str,
cert: str | tuple[str, str] | None,
) -> None:
super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
| InsecureCacheControlAdapter |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run.py | {
"start": 20943,
"end": 24794
} | class ____(BaseVariables):
"""
Default variables for the Cloud Run worker.
The schema for this class is used to populate the `variables` section of the default
base job template.
"""
region: str = Field(
default="us-central1",
description="The region where the Cloud Run Job resides.",
examples=["us-central1"],
)
credentials: Optional[GcpCredentials] = Field(
title="GCP Credentials",
default_factory=GcpCredentials,
description="The GCP Credentials used to initiate the "
"Cloud Run Job. If not provided credentials will be "
"inferred from the local environment.",
)
prefect_api_key_secret: Optional[SecretKeySelector] = Field(
title="Prefect API Key Secret",
default=None,
description=(
"A GCP secret containing a Prefect API Key. This key will be used "
"to authenticate Cloud Run tasks with Prefect Cloud. If not provided, the "
"PREFECT_API_KEY environment variable will be used if the worker has one."
),
)
prefect_api_auth_string_secret: Optional[SecretKeySelector] = Field(
title="Prefect API Auth String Secret",
default=None,
description=(
"A GCP secret containing a Prefect API authorization string. This "
"string will be used to authenticate Cloud Run tasks with Prefect Cloud. "
"If not provided, the PREFECT_API_AUTH_STRING environment variable will be "
"used if the worker has one."
),
)
image: Optional[str] = Field(
default=None,
title="Image Name",
description=(
"The image to use for a new Cloud Run Job. "
"If not set, the latest Prefect image will be used. "
"See https://cloud.google.com/run/docs/deploying#images."
),
examples=["docker.io/prefecthq/prefect:3-latest"],
)
cpu: Optional[str] = Field(
default=None,
title="CPU",
description=(
"The amount of compute allocated to the Cloud Run Job. "
"(1000m = 1 CPU). See "
"https://cloud.google.com/run/docs/configuring/cpu#setting-jobs."
),
examples=["1000m"],
pattern=r"^(\d*000)m$",
)
memory: Optional[str] = Field(
default=None,
title="Memory",
description=(
"The amount of memory allocated to the Cloud Run Job. "
"Must be specified in units of 'G', 'Gi', 'M', or 'Mi'. "
"See https://cloud.google.com/run/docs/configuring/memory-limits#setting."
),
examples=["512Mi"],
pattern=r"^\d+(?:G|Gi|M|Mi)$",
)
vpc_connector_name: Optional[str] = Field(
default=None,
title="VPC Connector Name",
description="The name of the VPC connector to use for the Cloud Run Job.",
)
service_account_name: Optional[str] = Field(
default=None,
title="Service Account Name",
description="The name of the service account to use for the task execution "
"of Cloud Run Job. By default Cloud Run jobs run as the default "
"Compute Engine Service Account. ",
examples=["service-account@example.iam.gserviceaccount.com"],
)
keep_job: Optional[bool] = Field(
default=False,
title="Keep Job After Completion",
description="Keep the completed Cloud Run Job after it has run.",
)
timeout: Optional[int] = Field(
title="Job Timeout",
default=600,
gt=0,
le=3600,
description=(
"Max allowed duration the Job may be active before Cloud Run will "
"actively try to mark it failed and kill associated containers (maximum of 3600 seconds, 1 hour)."
),
)
| CloudRunWorkerVariables |
python | getsentry__sentry | src/sentry/migrations/0975_grouplink_json_field.py | {
"start": 244,
"end": 1736
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0974_hc_json_field"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[mod.to_jsonb("sentry_grouplink", "data")],
state_operations=[
migrations.AlterField(
model_name="grouplink",
name="data",
field=models.JSONField(default=dict),
),
],
)
]
| Migration |
python | apache__airflow | providers/apache/spark/tests/unit/apache/spark/hooks/test_spark_connect.py | {
"start": 958,
"end": 2530
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="spark-default",
conn_type="spark_connect",
host="sc://spark-host",
port=1000,
login="spark-user",
password="1234",
extra='{"queue": "root.etl", "deploy-mode": "cluster"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark-test",
conn_type="spark_connect",
host="nowhere",
login="spark-user",
)
)
create_connection_without_db(
Connection(
conn_id="spark-app",
conn_type="spark_connect",
host="sc://cluster/app",
login="spark-user",
)
)
def test_get_connection_url(self):
expected_url = "sc://spark-host:1000/;user_id=spark-user;token=1234"
hook = SparkConnectHook(conn_id="spark-default")
assert hook.get_connection_url() == expected_url
expected_url = "sc://nowhere/;user_id=spark-user"
hook = SparkConnectHook(conn_id="spark-test")
assert hook.get_connection_url() == expected_url
hook = SparkConnectHook(conn_id="spark-app")
with pytest.raises(ValueError, match="not supported in Spark Connect connection URL"):
hook.get_connection_url()
| TestSparkConnectHook |
python | uqfoundation__dill | dill/tests/test_abc.py | {
"start": 472,
"end": 988
} | class ____(ABC):
@abc.abstractmethod
def foo(self):
"""A method"""
pass
@property
@abc.abstractmethod
def bar(self):
"""Property getter"""
pass
@bar.setter
@abc.abstractmethod
def bar(self, value):
"""Property setter"""
pass
@classmethod
@abc.abstractmethod
def cfoo(cls):
"""Class method"""
pass
@staticmethod
@abc.abstractmethod
def sfoo():
"""Static method"""
pass
| OneTwoThree |
python | pytorch__pytorch | test/quantization/core/test_quantized_op.py | {
"start": 186966,
"end": 232046
} | class ____(TestCase):
def _test_qlinear_impl(self, batch_size, input_channels, output_channels, use_bias,
post_op, use_multi_dim_input, use_channelwise, **post_op_kwargs):
decimal_val = 4
dtypes = [torch.quint8]
if torch.backends.quantized.engine == 'qnnpack':
# QNNPACK supports uint8 in the kernels. In the op we shift the int8
# weight values to uint8 to be on par with fbgemm. However, this causes
# some rounding issues in rare cases. So, we relax the check to allow
# off by one results.
decimal_val = 0
# only qnnpack qengine supports qint8 when xnnpack is available
if torch.backends.xnnpack.enabled:
dtypes.append(torch.qint8)
if qengine_is_onednn() and IS_ARM64:
dtypes.append(torch.qint8)
for dtype in dtypes:
# No support for channelwise in xnnpack (int8)
if dtype == torch.qint8 and use_channelwise:
return
nptype = np_dtype[dtype]
qlinear_prepack = torch.ops.quantized.linear_prepack
if post_op == 'relu':
qlinear = torch.ops.quantized.linear_relu
elif post_op == 'leaky_relu':
qlinear = torch.ops.quantized.linear_leaky_relu
else:
qlinear = torch.ops.quantized.linear
if use_multi_dim_input:
batch_size *= 3 # Test the multi-dim input tensor
X_scale = 1.5
X_zp = 5
X_value_min = -128 if dtype == torch.qint8 else 0
X_value_max = 127 if dtype == torch.qint8 else 255
X_q0 = np.round(
np.random.rand(batch_size, input_channels) *
(X_value_max - X_value_min)
+ X_value_min
).astype(nptype)
W_scales = np.random.rand(output_channels)
# xnnpack forces W_zp to 0 when using symmetric quantization
# ONEDNN only supports symmetric quantization of weight
if dtype == torch.qint8 or qengine_is_onednn():
W_zps = np.zeros(output_channels).astype(int)
else:
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(int)
# when using symmetric quantization
# special restriction for xnnpack fully connected op weight
# [-127, 127] instead of [-128, 127]
W_value_min = -127 if dtype == torch.qint8 else -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8) # weight is always int8_t
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32) if use_bias else None
if torch.backends.quantized.engine in ('x86', 'fbgemm', 'onednn') and not IS_ARM64:
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
X = torch.from_numpy(_dequantize(
X_q0, X_scale, X_zp)).to(dtype=torch.float)
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zp, dtype=dtype)
if use_channelwise:
W = torch.from_numpy(_dequantize(W_q0, W_scales.reshape(
(-1, 1)), W_zps.reshape((-1, 1)))).to(dtype=torch.float)
W_q = torch.quantize_per_channel(W, scales=torch.from_numpy(W_scales),
zero_points=torch.from_numpy(W_zps), axis=0, dtype=torch.qint8)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * W_scales, 0)).to(dtype=torch.float) if use_bias else None
b_q = torch.quantize_per_channel(b, scales=torch.from_numpy(X_scale * W_scales),
zero_points=torch.zeros(output_channels, dtype=torch.long),
axis=0, dtype=torch.qint32) if use_bias else None
else:
W = torch.from_numpy(_dequantize(
W_q0, W_scales[0], W_zps[0])).to(dtype=torch.float)
W_q = torch.quantize_per_tensor(W, scale=W_scales[0], zero_point=(
W_zps[0].astype(int).item()), dtype=torch.qint8)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * (W_scales[0].item()), 0)).to(dtype=torch.float) if use_bias else None
b_q = torch.quantize_per_tensor(
b, scale=X_scale * (W_scales[0].item()), zero_point=0, dtype=torch.qint32) if use_bias else None
# Compare X_scale * W_scale * input_channels * X_value_max * W_value_max with
# Y_scale * 255 (max for uint8).
Y_scale = 12.34
Y_zp = 5
# Weight prepacking operator for quantized Linear
float_bias = b if use_bias else None
W_prepack = qlinear_prepack(W_q, float_bias)
if use_multi_dim_input:
X_q = X_q.view(3, int(batch_size / 3), input_channels)
# Quantized Linear operator with prepacked weight
Y_q = qlinear(X_q, W_prepack, Y_scale, Y_zp, **post_op_kwargs)
if not use_channelwise and post_op in ('none', 'relu'):
# Test the per-tensor quantization only
# Reference quantized Linear operator
Y_q_ref = qlinear_ref(X_q0, X_scale, X_zp, W_q0,
W_scales[0], W_zps[0], b_q0, Y_scale, Y_zp, dtype=nptype)
if post_op == 'relu':
Y_q_ref[Y_q_ref < Y_zp] = Y_zp
if use_multi_dim_input:
Y_q_ref = np.reshape(
Y_q_ref, (3, int(batch_size / 3), output_channels))
# Assert equal
np.testing.assert_array_almost_equal(Y_q_ref, Y_q.int_repr().numpy(), decimal=decimal_val)
# Test both per-tensor and per-channel quantization
# Reference quantized result from PyTorch Linear operator
W_fp32 = W_q.dequantize().to(dtype=torch.float)
X_fp32 = X_q.dequantize().to(dtype=torch.float)
b_fp32 = b_q.dequantize().to(dtype=torch.float) if use_bias else None
Y_fp32_ref = F.linear(X_fp32, W_fp32, b_fp32)
if post_op == 'relu':
Y_fp32_ref[Y_fp32_ref < 0.0] = 0.0
elif post_op == 'leaky_relu':
Y_fp32_ref = F.leaky_relu(Y_fp32_ref, **post_op_kwargs)
Y_q_ref2 = torch.quantize_per_tensor(
Y_fp32_ref, Y_scale, Y_zp, dtype)
# Assert equal
np.testing.assert_array_almost_equal(
Y_q_ref2.int_repr().numpy(), Y_q.int_repr().numpy(), decimal=decimal_val)
"""Tests the correctness of the quantized linear op."""
@override_qengines
def test_qlinear(self):
batch_size_list = [1, 4]
input_channels_list = [16, 32]
output_channels_list = [4, 8]
use_bias_list = [True, False]
use_multi_dim_input_list = [True, False]
use_channelwise_list = [True, False]
post_op = 'none'
cases = itertools.product(batch_size_list, input_channels_list, output_channels_list,
use_bias_list, use_multi_dim_input_list, use_channelwise_list)
for batch_size, input_channels, output_channels, use_bias, \
use_multi_dim_input, use_channelwise in cases:
self._test_qlinear_impl(batch_size, input_channels, output_channels,
use_bias, post_op, use_multi_dim_input, use_channelwise)
"""Tests the correctness of the quantized linear_relu op."""
@override_qengines
def test_qlinear_relu(self):
batch_size_list = [1, 4]
input_channels_list = [16, 32]
output_channels_list = [4, 8]
use_bias_list = [True, False]
use_multi_dim_input_list = [True, False]
use_channelwise_list = [True, False]
post_op = 'relu'
cases = itertools.product(batch_size_list, input_channels_list, output_channels_list,
use_bias_list, use_multi_dim_input_list, use_channelwise_list)
for batch_size, input_channels, output_channels, use_bias, \
use_multi_dim_input, use_channelwise in cases:
self._test_qlinear_impl(batch_size, input_channels, output_channels,
use_bias, post_op, use_multi_dim_input, use_channelwise)
@given(batch_size=st.integers(1, 4),
input_channels=st.integers(16, 32),
output_channels=st.integers(4, 8),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_multi_dim_input=st.booleans(),
use_channelwise=st.booleans())
@skipIfNoFBGEMM
def test_qlinear_with_input_q_dq_qweight_dq_output_fp32(
self, batch_size, input_channels, output_channels, use_bias,
use_relu, use_multi_dim_input, use_channelwise):
decimal_val = 4
dtypes = [torch.quint8]
for dtype in dtypes:
# No support for channelwise in xnnpack (int8)
# ONEDNN does not support qint8
if dtype == torch.qint8 and (use_channelwise or qengine_is_onednn()):
return
nptype = np_dtype[dtype]
qlinear_prepack = torch.ops.quantized.linear_prepack
if use_relu:
qlinear = torch.ops.quantized.linear_with_input_q_dq_qweight_dq_relu_output_fp32
else:
qlinear = torch.ops.quantized.linear_with_input_q_dq_qweight_dq_output_fp32
if use_multi_dim_input:
batch_size *= 3 # Test the multi-dim input tensor
X_scale = 1.5
X_zp = 5
X_value_min = -128 if dtype == torch.qint8 else 0
X_value_max = 127 if dtype == torch.qint8 else 255
X_q0 = np.round(
np.random.rand(batch_size, input_channels) *
(X_value_max - X_value_min)
+ X_value_min
).astype(nptype)
W_scales = np.random.rand(output_channels)
# xnnpack forces W_zp to 0 when using symmetric quantization
# ONEDNN only supports symmetric quantization of weight
if dtype == torch.qint8 or qengine_is_onednn():
W_zps = np.zeros(output_channels).astype(int)
else:
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(int)
# when using symmetric quantization
# special restriction for xnnpack fully connected op weight
# [-127, 127] instead of [-128, 127]
W_value_min = -127 if dtype == torch.qint8 else -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8) # weight is always int8_t
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32) if use_bias else None
if torch.backends.quantized.engine in ('x86', 'fbgemm', 'onednn'):
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
X = torch.from_numpy(_dequantize(
X_q0, X_scale, X_zp)).to(dtype=torch.float)
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zp, dtype=dtype)
if use_channelwise:
W = torch.from_numpy(_dequantize(W_q0, W_scales.reshape(
(-1, 1)), W_zps.reshape((-1, 1)))).to(dtype=torch.float)
W_q = torch.quantize_per_channel(W, scales=torch.from_numpy(W_scales),
zero_points=torch.from_numpy(W_zps), axis=0, dtype=torch.qint8)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * W_scales, 0)).to(dtype=torch.float) if use_bias else None
b_q = torch.quantize_per_channel(b, scales=torch.from_numpy(X_scale * W_scales),
zero_points=torch.zeros(output_channels, dtype=torch.long),
axis=0, dtype=torch.qint32) if use_bias else None
else:
W = torch.from_numpy(_dequantize(
W_q0, W_scales[0], W_zps[0])).to(dtype=torch.float)
W_q = torch.quantize_per_tensor(W, scale=W_scales[0], zero_point=(
W_zps[0].astype(int).item()), dtype=torch.qint8)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * (W_scales[0].item()), 0)).to(dtype=torch.float) if use_bias else None
b_q = torch.quantize_per_tensor(
b, scale=X_scale * (W_scales[0].item()), zero_point=0, dtype=torch.qint32) if use_bias else None
# Compare X_scale * W_scale * input_channels * X_value_max * W_value_max with
# Y_scale * 255 (max for uint8).
Y_scale = 125.1234
Y_zp = 5
# Weight prepacking operator for quantized Linear
float_bias = b if use_bias else None
W_prepack = qlinear_prepack(W_q, float_bias)
if use_multi_dim_input:
X = X.view(3, int(batch_size / 3), input_channels)
X_q = X_q.view(3, int(batch_size / 3), input_channels)
# Quantized Linear operator with prepacked weight
Y_q_dq = qlinear(X, X_scale, X_zp, W_prepack)
# Test both per-tensor and per-channel quantization
# Reference quantized result from PyTorch Linear operator
W_fp32 = W_q.dequantize().to(dtype=torch.float)
X_fp32 = X_q.dequantize().to(dtype=torch.float)
b_fp32 = b_q.dequantize().to(dtype=torch.float) if use_bias else None
Y_fp32_ref = F.linear(X_fp32, W_fp32, b_fp32)
if use_relu:
Y_fp32_ref[Y_fp32_ref < 0.0] = 0.0
decimal_val = 1
np.testing.assert_array_almost_equal(Y_fp32_ref.numpy(), Y_q_dq.numpy(), decimal=decimal_val)
@given(batch_size=st.integers(1, 4),
# in cudnn v. 8.4.0, there is a limitation that input channels
# should be a multiple of 4 for int8 tensors. in cudnn v.8.3.3
# this should be a multiple of 16
input_channels=st.sampled_from([4, 8, 12, 16, 32]),
# constraints on output channels appear to be relax, as it seems we can use any positive integer here
# except 1. It is not clear why 1 will not work. TODO: check with Yang
output_channels=st.integers(2, 36),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_multi_dim_input=st.booleans(),
use_channelwise=st.sampled_from([False])) # channelwise currently not supported for qlinear cudnn
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skipIf(TEST_CUDNN and torch.backends.cudnn.version() == 90100, "expected failure on cuDNN 9.1.0")
@unittest.skipIf(not SM80OrLater, "requires sm80 or later.")
@unittest.skipIf(TEST_ROCM, "not supported on rocm.")
# TODO: check with yang regarding CUDNN flags
@unittest.skip("not currently working and feature isn't used")
def test_qlinear_cudnn(self, batch_size, input_channels, output_channels, use_bias,
use_relu, use_multi_dim_input, use_channelwise):
qlinear_prepack = torch.ops.quantized.linear_prepack
if use_relu:
qlinear_op = torch.ops.quantized.linear_relu
else:
qlinear_op = torch.ops.quantized.linear
X_scale = 1.5
X_zp = 0
X_value_min = -128
X_value_max = 127
X_q0 = np.round(
np.random.rand(batch_size, input_channels) *
(X_value_max - X_value_min)
+ X_value_min).astype(np.int8)
W_scale = 2.5
W_zp = 0
W_value_min = -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8)
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32) if use_bias else None
if use_bias:
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32)
else:
bias = None
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
quant_dtype = torch.qint8
X = torch.from_numpy(_dequantize(
X_q0, X_scale, X_zp)).to(dtype=torch.float).to(device="cuda")
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zp, dtype=quant_dtype)
W = torch.from_numpy(_dequantize(
W_q0, W_scale, W_zp)).to(dtype=torch.float).to(device="cuda")
W_q = torch.quantize_per_tensor(W, scale=W_scale, zero_point=W_zp, dtype=quant_dtype)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * (W_zp), 0)).to(dtype=torch.float).to(device="cuda") if use_bias else None
b_q = torch.quantize_per_tensor(
b, scale=X_scale * W_scale, zero_point=0, dtype=quant_dtype) if use_bias else None
Y_scale = 0.5
Y_zp = 0
# Weight prepacking operator for quantized Linear
float_bias = b if use_bias else None
W_prepack = qlinear_prepack(W_q, float_bias if use_bias else None)
# Quantized Linear operator with prepacked weight
Y_q = qlinear_op(X_q, W_prepack, Y_scale, Y_zp).to(device="cpu")
Y_q_ref = qlinear_ref(X_q0, X_scale, X_zp, W_q0,
W_scale, W_zp, b_q0, Y_scale, Y_zp, dtype=np.int8)
if use_relu:
Y_q_ref[Y_q_ref < Y_zp] = Y_zp
decimal_val = 0
np.testing.assert_array_almost_equal(Y_q_ref, Y_q.int_repr().numpy(), decimal=decimal_val)
"""Tests the correctness of the quantized::linear_unpack op."""
@given(W=hu.tensor(shapes=hu.array_shapes(2, 2,),
qparams=hu.qparams(dtypes=torch.qint8)),
use_channelwise=st.booleans())
@override_qengines
def test_qlinear_unpack(self, W, use_channelwise):
W, (W_scale, W_zp, torch_type) = W
if use_channelwise:
output_channels = W.shape[0]
W_scales = torch.rand(output_channels).to(torch.double)
W_zps = torch.round(torch.rand(output_channels)
* 100 - 50).to(torch.int64)
qlinear_prepack = torch.ops.quantized.linear_prepack
qlinear_unpack = torch.ops.quantized.linear_unpack
# ONEDNN only supports symmetric quantization of weight
if qengine_is_onednn():
if use_channelwise:
W_zps = torch.zeros(output_channels).to(torch.int64)
else:
W_zp = 0
W = torch.from_numpy(W)
if use_channelwise:
W_q = torch.quantize_per_channel(
W, W_scales, W_zps, 0, dtype=torch_type)
else:
W_q = torch.quantize_per_tensor(W, scale=W_scale, zero_point=W_zp,
dtype=torch_type)
# Weight prepacking operator for quantized Linear
W_prepack = qlinear_prepack(W_q)
# Weight unpack operator for quantized Linear (Used for serialization)
W_q_origin = qlinear_unpack(W_prepack)[0]
# Assert equal
np.testing.assert_equal(W_q.int_repr(), W_q_origin.int_repr().numpy())
if use_channelwise:
np.testing.assert_array_almost_equal(np.float32(W_q.q_per_channel_scales().numpy()),
np.float32(
W_q_origin.q_per_channel_scales().numpy()),
decimal=4)
np.testing.assert_equal(W_q.q_per_channel_zero_points(
).numpy(), W_q_origin.q_per_channel_zero_points().numpy())
else:
np.testing.assert_equal(np.float32(
W_q.q_scale()), np.float32(W_q_origin.q_scale()))
np.testing.assert_equal(
W_q.q_zero_point(), W_q_origin.q_zero_point())
"""Tests the correctness of the _quantized::wrapped_quantized_linear op."""
@skipIfNoFBGEMM
@given(
m=st.integers(2, 6),
k=st.integers(2, 6),
n=st.integers(2, 6),
)
def test_wrapped_quantized_linear(self, m, n, k):
input = torch.randn(m, k, dtype=torch.float32)
input_scale = torch.tensor(0.1)
input_zero_point = torch.tensor(0)
weight = torch.randn(n, k, dtype=torch.float32)
weight_scale = torch.tensor(0.1)
weight_zero_point = torch.tensor(0)
bias = torch.randn(n, dtype=torch.float32)
output_scale = torch.tensor(0.1)
output_zero_point = torch.tensor(0)
out_channel = n
ret = torch.ops._quantized.wrapped_quantized_linear(
input,
input_scale,
input_zero_point,
weight,
weight_scale,
weight_zero_point,
bias,
output_scale,
output_zero_point,
out_channel,
)
qinput = torch.quantize_per_tensor(input, input_scale, input_zero_point, torch.quint8)
qweight = torch.quantize_per_tensor(weight, weight_scale, weight_zero_point, torch.qint8)
qlinear_prepack = torch.ops.quantized.linear_prepack(qweight, bias)
qlinear = torch.ops.quantized.linear(qinput, qlinear_prepack, output_scale, output_zero_point)
ret_ref = qlinear.dequantize()
self.assertEqual(ret, ret_ref)
"""Tests the correctness of the _quantized::_wrapped_linear_prepack and
_quantized::_wrapped_quantized_linear_prepacked ops."""
@skipIfNoFBGEMM
@given(
m=st.integers(2, 6),
k=st.integers(2, 6),
n=st.integers(2, 6),
)
def test_wrapped_quantized_linear_prepacked(self, m, n, k):
input = torch.randn(m, k, dtype=torch.float32)
input_scale = torch.tensor(0.1)
input_zero_point = torch.tensor(0)
weight = torch.randn(n, k, dtype=torch.float32)
weight_scale = torch.tensor(0.1)
weight_zero_point = torch.tensor(0)
bias = torch.randn(n, dtype=torch.float32)
output_scale = torch.tensor(0.1)
output_zero_point = torch.tensor(0)
out_channel = n
ret_1 = torch.ops._quantized._wrapped_linear_prepack(
weight,
weight_scale,
weight_zero_point,
bias
)
ret_2 = torch.ops._quantized._wrapped_quantized_linear_prepacked(
input,
input_scale,
input_zero_point,
ret_1,
output_scale,
output_zero_point,
out_channel
)
qinput = torch.quantize_per_tensor(input, input_scale, input_zero_point, torch.quint8)
qweight = torch.quantize_per_tensor(weight, weight_scale, weight_zero_point, torch.qint8)
qlinear_prepack = torch.ops.quantized.linear_prepack(qweight, bias)
qlinear = torch.ops.quantized.linear(qinput, qlinear_prepack, output_scale, output_zero_point)
ret_ref = qlinear.dequantize()
self.assertEqual(ret_2, ret_ref)
"""Tests the correctness of the quantized::linear_unpack after freeing original tensor op."""
@skipIfNoQNNPACK
@given(W=hu.tensor(shapes=hu.array_shapes(2, 2,),
qparams=hu.qparams(dtypes=torch.qint8)))
@override_qengines
def test_qlinear_qnnpack_free_memory_and_unpack(self, W):
assert qengine_is_qnnpack
W, (W_scale, W_zp, torch_type) = W
qlinear_prepack = torch.ops.quantized.linear_prepack
qlinear_unpack = torch.ops.quantized.linear_unpack
W = torch.from_numpy(W)
# ONEDNN only supports symmetric quantization of weight
if qengine_is_onednn():
W_zp = 0
W_q = torch.quantize_per_tensor(W, scale=W_scale, zero_point=W_zp, dtype=torch_type)
# Weight prepacking operator for quantized Linear
W_prepack = qlinear_prepack(W_q)
dummy_input = torch.randn((1, W.shape[1]))
# Make sure we free original tensor by running matrix multiplication in backend.
torch.ops.quantized.linear_dynamic(dummy_input, W_prepack)
torch.ops.quantized.linear_dynamic(dummy_input, W_prepack)
# At this step, original tensor should be recovered from a data_ptr
W_q_origin = qlinear_unpack(W_prepack)[0]
# Assert equal
np.testing.assert_equal(W_q.int_repr(), W_q_origin.int_repr().numpy())
np.testing.assert_equal(np.float32(
W_q.q_scale()), np.float32(W_q_origin.q_scale()))
np.testing.assert_equal(
W_q.q_zero_point(), W_q_origin.q_zero_point())
@skipIfNoONEDNN
def test_qlinear_leaky_relu(self):
with override_quantized_engine('onednn'):
batch_size_list = [1, 4]
input_channels_list = [16, 32]
output_channels_list = [4, 8]
use_bias_list = [True, False]
use_multi_dim_input_list = [True, False]
use_channelwise_list = [True, False]
negative_slopes_list = [0.01, 0.05]
post_op = 'leaky_relu'
cases = itertools.product(batch_size_list, input_channels_list, output_channels_list,
use_bias_list, use_multi_dim_input_list,
use_channelwise_list, negative_slopes_list)
for batch_size, input_channels, output_channels, use_bias, \
use_multi_dim_input, use_channelwise, neg_slope in cases:
self._test_qlinear_impl(batch_size, input_channels, output_channels,
use_bias, post_op, use_multi_dim_input,
use_channelwise, negative_slope=neg_slope)
@skipIfNoONEDNN
def test_qlinear_tanh(self):
with override_quantized_engine('onednn'):
batch_size_list = [1, 4]
input_channels_list = [16, 32]
output_channels_list = [4, 8]
use_bias_list = [True, False]
use_multi_dim_input_list = [True, False]
use_channelwise_list = [True, False]
post_op = 'tanh'
cases = itertools.product(batch_size_list, input_channels_list,
output_channels_list, use_bias_list,
use_multi_dim_input_list, use_channelwise_list)
for batch_size, input_channels, output_channels, use_bias, \
use_multi_dim_input, use_channelwise in cases:
self._test_qlinear_impl(batch_size, input_channels, output_channels,
use_bias, post_op, use_multi_dim_input,
use_channelwise)
def _test_qlinear_pt2e_helper(
self,
qlinear_op,
post_op="none",
unary_post_op_args=(),
post_op_algorithms=("none",),
test_fast_path=False,
):
if test_fast_path:
import os
os.environ["ONEDNN_CACHE_CONTEXT_UNSAFE"] = "1"
qlinear_prepack = torch.ops.onednn.qlinear_prepack
linear_op = F.linear
in_channels_list = [4, 8]
out_channels_list = [16, 32]
batch_size = 1
use_bias_list = [True, False]
weight_quant_per_channel_list = [True, False]
output_dtype_list = [None, torch.float32, torch.bfloat16]
x_scale, x_zp = 1.2, 1
w_scale, w_zp = 0.8, 0
y_scale, y_zp = 4.7, 2
input_dim_list = [2, 3]
cases = itertools.product(
in_channels_list, out_channels_list, use_bias_list,
weight_quant_per_channel_list, output_dtype_list, post_op_algorithms, input_dim_list)
with override_quantized_engine('onednn'):
for ic, oc, use_bias, weight_quant_per_channel, output_dtype, post_op_algo, input_dim in cases:
used_y_scale = y_scale
used_y_zp = y_zp
fp32_out = output_dtype == torch.float32
bfloat16_out = output_dtype == torch.bfloat16
if fp32_out or bfloat16_out:
used_y_scale, used_y_zp = 1.0, 0
x2_scale, x2_zp = 1.0, 0
else:
x2_scale, x2_zp = 2.3, 5
x = torch.rand(batch_size, (ic + 1), ic) * 10 if input_dim == 3 else torch.rand(batch_size, ic) * 10
w = torch.rand(oc, ic) * 10
qx = torch.quantize_per_tensor(x, x_scale, x_zp, torch.quint8)
if weight_quant_per_channel:
w_scales = torch.Tensor([w_scale] * oc)
w_zps = torch.zeros(oc).to(dtype=torch.int)
qw = torch.quantize_per_channel(w, w_scales, w_zps, 0, torch.qint8)
else:
w_scales = torch.Tensor([w_scale])
w_zps = torch.Tensor([w_zp]).to(dtype=torch.int)
qw = torch.quantize_per_tensor(w, w_scale, w_zp, torch.qint8)
if use_bias:
b = torch.rand(oc) * 10
else:
b = None
x_ref = qx.dequantize()
w_ref = qw.dequantize()
y_ref = linear_op(x_ref, w_ref, b)
# compute with CPU tensors
qx_cpu = qx.int_repr()
qw_cpu = qw.int_repr()
qw_packed = qlinear_prepack(qw_cpu, x.shape)
num_iter = 2 if test_fast_path else 1 # rerun to use cache
if post_op in ("none", "relu", "gelu"):
for _ in range(num_iter):
qy_cpu = qlinear_op(
qx_cpu, x_scale, x_zp, qw_packed, w_scales, w_zps,
b, used_y_scale, used_y_zp, output_dtype,
post_op, unary_post_op_args, post_op_algo
)
if post_op == "relu":
y_ref = F.relu(y_ref)
elif post_op == "gelu":
y_ref = F.gelu(y_ref, approximate=post_op_algo)
qy_ref = torch.quantize_per_tensor(y_ref, used_y_scale, used_y_zp, torch.quint8)
elif post_op in ("sum", "sum_relu"):
x2_int8 = torch.randint(0, 4, y_ref.size())
x2 = x2_scale * ((x2_int8 - x2_zp).float())
qx2 = torch.quantize_per_tensor(
x2, scale=x2_scale, zero_point=x2_zp, dtype=torch.quint8
)
unary_post_op = "relu" if post_op == "sum_relu" else "none"
binary_alpha = 1.0 # we only support alpha=1.0 now
accum = qx2.int_repr() if output_dtype is None else qx2.dequantize()
if bfloat16_out:
accum = accum.bfloat16()
for _ in range(num_iter):
# clone accum otherwise it gets accumulated multiple times
qy_cpu = qlinear_op(
qx_cpu, x_scale, x_zp, qw_packed, w_scales, w_zps,
accum.clone(), b, used_y_scale, used_y_zp, output_dtype,
x2_scale, x2_zp, "sum", binary_alpha,
unary_post_op, unary_post_op_args, post_op_algo
)
y_ref = y_ref + x2 * binary_alpha
if unary_post_op == "relu":
y_ref = F.relu(y_ref)
qy_ref = torch.quantize_per_tensor(y_ref, used_y_scale, used_y_zp, torch.quint8)
elif post_op in ("add", "add_relu"):
used_y_scale, used_y_zp = 1.0, 0
if output_dtype is not None:
# Only support int8 output
continue
x2 = torch.randn(y_ref.size()) * 10
unary_post_op = "relu" if post_op == "add_relu" else "none"
binary_alpha = 1.0 # we only support alpha=1.0 now
for _ in range(num_iter):
qy_cpu = qlinear_op(
qx_cpu, x_scale, x_zp, qw_packed, w_scales, w_zps,
x2, b, used_y_scale, used_y_zp, output_dtype,
1.0, 0, "add", binary_alpha,
unary_post_op, unary_post_op_args, post_op_algo
)
y_ref = y_ref + x2 * binary_alpha
if unary_post_op == "relu":
y_ref = F.relu(y_ref)
qy_ref = torch.quantize_per_tensor(y_ref, used_y_scale, used_y_zp, torch.quint8)
# Compare results
if fp32_out or bfloat16_out:
qy_cpu = torch.quantize_per_tensor(
qy_cpu.to(torch.float32),
used_y_scale,
used_y_zp, dtype=torch.quint8
).int_repr()
self.assertEqual(x.dim(), qy_cpu.dim())
np.testing.assert_array_almost_equal(
qy_ref.int_repr().cpu().numpy(),
qy_cpu.cpu().numpy(),
decimal=0,
err_msg=f"""X: {x}, W: {w}, b: {b},
x_s: {x_scale}, x_zp: {x_zp},
w_s: {w_scale}, w_zp: {w_zp},
y_s: {y_scale}, y_zp: {y_zp}""",
)
if test_fast_path:
del os.environ["ONEDNN_CACHE_CONTEXT_UNSAFE"]
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_pt2e(self):
qlinear = torch.ops.onednn.qlinear_pointwise
self._test_qlinear_pt2e_helper(qlinear, "none")
self._test_qlinear_pt2e_helper(qlinear, "none", test_fast_path=True)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_relu_pt2e(self):
qlinear = torch.ops.onednn.qlinear_pointwise
self._test_qlinear_pt2e_helper(qlinear, "relu")
self._test_qlinear_pt2e_helper(qlinear, "relu", test_fast_path=True)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_gelu_pt2e(self):
qlinear = torch.ops.onednn.qlinear_pointwise
post_op_algorithms = ['none', 'tanh']
self._test_qlinear_pt2e_helper(qlinear, "gelu", post_op_algorithms=post_op_algorithms)
self._test_qlinear_pt2e_helper(qlinear, "gelu", post_op_algorithms=post_op_algorithms, test_fast_path=True)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_sum_pt2e(self):
qlinear = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_pt2e_helper(qlinear, "sum")
self._test_qlinear_pt2e_helper(qlinear, "sum", test_fast_path=True)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_sum_relu_pt2e(self):
qlinear = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_pt2e_helper(qlinear, "sum_relu")
self._test_qlinear_pt2e_helper(qlinear, "sum_relu", test_fast_path=True)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_add_pt2e(self):
qlinear = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_pt2e_helper(qlinear, "add")
self._test_qlinear_pt2e_helper(qlinear, "add", test_fast_path=True)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_add_relu_pt2e(self):
qlinear = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_pt2e_helper(qlinear, "add_relu")
self._test_qlinear_pt2e_helper(qlinear, "add_relu", test_fast_path=True)
def _test_qlinear_fp8_helper(
self,
qlinear_op,
post_op="none",
unary_post_op_args=(),
post_op_algorithms=("none",),
):
qlinear_prepack = torch.ops.onednn.qlinear_prepack
linear_op = F.linear
in_channels_list = [4, 8]
out_channels_list = [16, 32]
batch_size = 1
use_bias_list = [True, False]
weight_quant_per_channel_list = [True, False]
output_dtype_list = [None, torch.float32, torch.bfloat16]
y_scale, y_zp = 0.3, 0
input_dim_list = [2, 3]
cases = itertools.product(
in_channels_list, out_channels_list, use_bias_list,
weight_quant_per_channel_list, output_dtype_list, post_op_algorithms, input_dim_list)
with override_quantized_engine('onednn'):
for ic, oc, use_bias, weight_quant_per_channel, output_dtype, post_op_algo, input_dim in cases:
used_y_scale = y_scale
used_y_zp = y_zp
fp32_out = output_dtype == torch.float32
bfloat16_out = output_dtype == torch.bfloat16
if fp32_out or bfloat16_out:
used_y_scale = 1.0
x2_scale, x2_zp = 1.0, 0
else:
x2_scale, x2_zp = 0.3, 0
x = torch.rand(batch_size, (ic + 1), ic) * 10 if input_dim == 3 else torch.rand(batch_size, ic) * 10
w = torch.rand(oc, ic) * 10
qx, x_scale = _quantize_fp8e4m3(x, channelwise=False)
qw, w_scales = _quantize_fp8e4m3(w, channelwise=weight_quant_per_channel)
if use_bias:
b = torch.rand(oc) * 10
if bfloat16_out:
b = b.to(torch.bfloat16)
else:
b = None
# compute reference result
x_ref = _dequantize_fp8e4m3(qx, x_scale)
w_ref = _dequantize_fp8e4m3(qw, w_scales)
if b is not None:
y_ref = linear_op(x_ref, w_ref, b.to(torch.float))
else:
y_ref = linear_op(x_ref, w_ref)
# compute fp8 linear
qw_packed = qlinear_prepack(qw, x.shape)
x_zp = 0
w_zps = torch.zeros_like(w_scales, dtype=torch.int)
if post_op in ("none", "relu", "gelu"):
qy = qlinear_op(
qx, x_scale, x_zp, qw_packed, w_scales, w_zps,
b, used_y_scale, used_y_zp, output_dtype,
post_op, unary_post_op_args, post_op_algo
)
if post_op == "relu":
y_ref = F.relu(y_ref)
elif post_op == "gelu":
y_ref = F.gelu(y_ref, approximate=post_op_algo)
elif post_op in ("sum", "sum_relu"):
x2 = torch.rand_like(y_ref)
x2_q, x2_scale = _quantize_fp8e4m3(x2, channelwise=False)
x2_dq = _dequantize_fp8e4m3(x2_q, x2_scale)
unary_post_op = "relu" if post_op == "sum_relu" else "none"
binary_alpha = 1.0 # we only support alpha=1.0 now
# if output_dtype is fp32 or bf16, accumulate on x2
# if output_dtype is None (fp8), accumulate on x2_dq
accum = x2_q if output_dtype is None else x2
accum_ref = x2_dq if output_dtype is None else x2.clone()
x2_scale = x2_scale if output_dtype is None else 1.0
if bfloat16_out:
accum = accum.bfloat16()
accum_ref = accum_ref.bfloat16()
qy = qlinear_op(
qx, x_scale, x_zp, qw_packed, w_scales, w_zps,
accum, b, used_y_scale, used_y_zp, output_dtype,
x2_scale, x2_zp, "sum", binary_alpha,
unary_post_op, unary_post_op_args, post_op_algo
)
y_ref = y_ref + accum_ref * binary_alpha
if unary_post_op == "relu":
y_ref = F.relu(y_ref)
elif post_op in ("add", "add_relu"):
if output_dtype is not None:
# Only support fp8 output
continue
x2 = torch.rand_like(y_ref)
unary_post_op = "relu" if post_op == "add_relu" else "none"
binary_alpha = 1.0 # we only support alpha=1.0 now
qy = qlinear_op(
qx, x_scale, x_zp, qw_packed, w_scales, w_zps,
x2, b, used_y_scale, used_y_zp, output_dtype,
1.0, 0, "add", binary_alpha,
unary_post_op, unary_post_op_args, post_op_algo
)
y_ref = y_ref + x2 * binary_alpha
if unary_post_op == "relu":
y_ref = F.relu(y_ref)
# Compare results
if output_dtype is None:
y_ref = _quantize_fp8e4m3(y_ref, False, used_y_scale)[0]
else:
y_ref = y_ref.to(output_dtype)
self.assertEqual(x.dim(), qy.dim())
self.assertEqual(y_ref.float(), qy.float())
assert not torch.isnan(qy).any()
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_fp8(self):
qlinear = torch.ops.onednn.qlinear_pointwise
self._test_qlinear_fp8_helper(qlinear, "none")
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_relu_fp8(self):
qlinear = torch.ops.onednn.qlinear_pointwise
self._test_qlinear_fp8_helper(qlinear, "relu")
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_gelu_fp8(self):
qlinear = torch.ops.onednn.qlinear_pointwise
post_op_algorithms = ['none', 'tanh']
self._test_qlinear_fp8_helper(qlinear, "gelu", post_op_algorithms=post_op_algorithms)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_sum_fp8(self):
qlinear = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_fp8_helper(qlinear, "sum")
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_sum_relu_fp8(self):
qlinear = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_fp8_helper(qlinear, "sum_relu")
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_add_fp8(self):
qlinear = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_fp8_helper(qlinear, "add")
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qlinear_add_relu_fp8(self):
qlinear = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_fp8_helper(qlinear, "add_relu")
@unittest.skipIf(IS_MACOS, "Known test failure on Mac.")
| TestQuantizedLinear |
python | kamyu104__LeetCode-Solutions | Python/create-sorted-array-through-instructions.py | {
"start": 33,
"end": 504
} | class ____(object): # 0-indexed.
def __init__(self, n):
self.__bit = [0]*(n+1) # Extra one for dummy node.
def add(self, i, val):
i += 1 # Extra one for dummy node.
while i < len(self.__bit):
self.__bit[i] += val
i += (i & -i)
def query(self, i):
i += 1 # Extra one for dummy node.
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
| BIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.