language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | django/db/models/deletion.py | {
"start": 2354,
"end": 3406
} | class ____:
def __init__(self, operation, name, forced_collector=None):
self.operation = operation
self.forced_collector = forced_collector
self.__name__ = name
__call__ = DO_NOTHING
def on_delete_sql(self, schema_editor):
return schema_editor.connection.ops.fk_on_delete_sql(self.operation)
def __str__(self):
return self.__name__
DB_CASCADE = DatabaseOnDelete("CASCADE", "DB_CASCADE", CASCADE)
DB_SET_DEFAULT = DatabaseOnDelete("SET DEFAULT", "DB_SET_DEFAULT")
DB_SET_NULL = DatabaseOnDelete("SET NULL", "DB_SET_NULL")
SKIP_COLLECTION = frozenset([DO_NOTHING, DB_CASCADE, DB_SET_DEFAULT, DB_SET_NULL])
def get_candidate_relations_to_delete(opts):
# The candidate relations are the ones that come from N-1 and 1-1
# relations. N-N (i.e., many-to-many) relations aren't candidates for
# deletion.
return (
f
for f in opts.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many)
)
| DatabaseOnDelete |
python | bottlepy__bottle | bottle.py | {
"start": 142330,
"end": 143008
} | class ____(AsyncioServerAdapter):
""" Asynchronous HTTP client/server framework for asyncio
https://pypi.python.org/pypi/aiohttp/
https://pypi.org/project/aiohttp-wsgi/
"""
def get_event_loop(self):
import asyncio
return asyncio.new_event_loop()
def run(self, handler):
import asyncio
from aiohttp_wsgi.wsgi import serve
self.loop = self.get_event_loop()
asyncio.set_event_loop(self.loop)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
serve(handler, host=self.host, port=self.port)
| AiohttpServer |
python | python-poetry__poetry | tests/helpers.py | {
"start": 5341,
"end": 6285
} | class ____(Locker):
# class name begins 'Test': tell pytest that it does not contain testcases.
__test__ = False
def __init__(self, lock: Path, pyproject_data: dict[str, Any]) -> None:
super().__init__(lock, pyproject_data)
self._locked = False
self._write = False
def write(self, write: bool = True) -> None:
self._write = write
def is_locked(self) -> bool:
return self._locked
def locked(self, is_locked: bool = True) -> TestLocker:
self._locked = is_locked
return self
def mock_lock_data(self, data: dict[str, Any]) -> None:
self.locked()
self._lock_data = data
def is_fresh(self) -> bool:
return True
def _write_lock_data(self, data: TOMLDocument) -> None:
if self._write:
super()._write_lock_data(data)
self._locked = True
return
self._lock_data = data
| TestLocker |
python | huggingface__transformers | src/transformers/pipelines/image_to_image.py | {
"start": 1171,
"end": 5261
} | class ____(Pipeline):
"""
Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous
image input.
Example:
```python
>>> from PIL import Image
>>> import httpx
>>> import io
>>> from transformers import pipeline
>>> upscaler = pipeline("image-to-image", model="caidas/swin2SR-classical-sr-x2-64")
>>> img = Image.open(io.BytesIO(httpx.get("http://images.cocodataset.org/val2017/000000039769.jpg").content))
>>> img = img.resize((64, 64))
>>> upscaled_img = upscaler(img)
>>> img.size
(64, 64)
>>> upscaled_img.size
(144, 144)
```
This image to image pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"image-to-image"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-to-image).
"""
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES)
def _sanitize_parameters(self, **kwargs):
preprocess_params = {}
postprocess_params = {}
forward_params = {}
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
return preprocess_params, forward_params, postprocess_params
@overload
def __call__(self, images: Union[str, "Image.Image"], **kwargs: Any) -> "Image.Image": ...
@overload
def __call__(self, images: list[str] | list["Image.Image"], **kwargs: Any) -> list["Image.Image"]: ...
def __call__(
self, images: Union[str, list[str], "Image.Image", list["Image.Image"]], **kwargs: Any
) -> Union["Image.Image", list["Image.Image"]]:
"""
Transform the image(s) passed as inputs.
Args:
images (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
images.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
the call may block forever.
Return:
An image (Image.Image) or a list of images (list["Image.Image"]) containing result(s). If the input is a
single image, the return will be also a single image, if the input is a list of several images, it will
return a list of transformed images.
"""
return super().__call__(images, **kwargs)
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def preprocess(self, image, timeout=None):
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors="pt")
inputs = inputs.to(self.dtype)
return inputs
def postprocess(self, model_outputs):
images = []
if "reconstruction" in model_outputs:
outputs = model_outputs.reconstruction
for output in outputs:
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output = np.moveaxis(output, source=0, destination=-1)
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
images.append(Image.fromarray(output))
return images if len(images) > 1 else images[0]
| ImageToImagePipeline |
python | ray-project__ray | python/ray/_private/runtime_env/plugin.py | {
"start": 3191,
"end": 3509
} | class ____:
def __init__(
self,
name: str,
class_instance: RuntimeEnvPlugin,
priority: int,
uri_cache: URICache,
):
self.name = name
self.class_instance = class_instance
self.priority = priority
self.uri_cache = uri_cache
| PluginSetupContext |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py | {
"start": 10056,
"end": 12276
} | class ____(Qwen3VLConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3VLMoeModel`]. It is used to instantiate a
Qwen3-VL-MOE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen3-VL-30B-A3B-Instruct [Qwen/Qwen3-VL-30B-A3B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-30B-A3B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLMoeTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLMoeVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151655):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151656):
The video token index to encode the image prompt.
vision_start_token_id (`int`, *optional*, defaults to 151652):
The start token index to encode the image prompt.
vision_end_token_id (`int`, *optional*, defaults to 151653):
The end token index to encode the image prompt.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie the word embeddings.
```python
>>> from transformers import Qwen3VLMoeForConditionalGeneration, Qwen3VLMoeConfig
>>> # Initializing a Qwen3-VL-MOE style configuration
>>> configuration = Qwen3VLMoeConfig()
>>> # Initializing a model from the Qwen3-VL-30B-A3B style configuration
>>> model = Qwen3VLMoeForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_vl_moe"
sub_configs = {"vision_config": Qwen3VLMoeVisionConfig, "text_config": Qwen3VLMoeTextConfig}
| Qwen3VLMoeConfig |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 1848,
"end": 2105
} | class ____(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
| ChunkHandler |
python | ansible__ansible | lib/ansible/plugins/filter/__init__.py | {
"start": 222,
"end": 528
} | class ____(AnsibleJinja2Plugin):
@property
def plugin_type(self) -> str:
return "filter"
def _no_options(self, *args, **kwargs) -> t.NoReturn:
raise NotImplementedError("Jinja2 filter plugins do not support option functions, they use direct arguments instead.")
| AnsibleJinja2Filter |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 149317,
"end": 155915
} | class ____:
def test_noniterable_hook_raises(self):
def running_hook():
pass
with pytest.raises(
TypeError,
match=re.escape(
"Expected iterable for 'on_running'; got function instead. Please"
" provide a list of hooks to 'on_running':\n\n"
"@task(on_running=[hook1, hook2])\ndef my_task():\n\tpass"
),
):
@task(on_running=running_hook)
def task1():
pass
def test_noncallable_hook_raises(self):
with pytest.raises(
TypeError,
match=re.escape(
"Expected callables in 'on_running'; got str instead. Please provide"
" a list of hooks to 'on_running':\n\n"
"@task(on_running=[hook1, hook2])\ndef my_task():\n\tpass"
),
):
@task(on_running=["test"])
def task1():
pass
def test_callable_noncallable_hook_raises(self):
def running_hook():
pass
with pytest.raises(
TypeError,
match=re.escape(
"Expected callables in 'on_running'; got str instead. Please provide"
" a list of hooks to 'on_running':\n\n"
"@task(on_running=[hook1, hook2])\ndef my_task():\n\tpass"
),
):
@task(on_running=[running_hook, "test"])
def task2():
pass
def test_decorated_on_running_hooks_run_on_running(self):
my_mock = MagicMock()
@task
def my_task():
pass
@my_task.on_running
def running1(task, task_run, state):
my_mock("running1")
@my_task.on_running
def running2(task, task_run, state):
my_mock("running2")
@flow
def my_flow():
return my_task(return_state=True)
state = my_flow()
assert state.type == StateType.COMPLETED
assert my_mock.call_args_list == [call("running1"), call("running2")]
def test_on_running_hooks_run_on_running(self):
my_mock = MagicMock()
def running1(task, task_run, state):
my_mock("running1")
def running2(task, task_run, state):
my_mock("running2")
@task(on_running=[running1, running2])
def my_task():
pass
@flow
def my_flow():
return my_task(return_state=True)
state = my_flow()
assert state.type == StateType.COMPLETED
assert my_mock.call_args_list == [call("running1"), call("running2")]
def test_on_running_hooks_run_on_both_completed_and_failed(self):
my_mock = MagicMock()
def running1(task, task_run, state):
my_mock("running")
@task(on_running=[running1])
def successful_task():
pass
@task(on_running=[running1])
def failing_task():
raise Exception("oops")
@flow
def my_flow():
successful_task()
try:
failing_task()
except Exception:
pass
my_flow()
assert my_mock.call_args_list == [call("running"), call("running")]
def test_other_running_hooks_run_if_a_hook_fails(self):
my_mock = MagicMock()
def running1(task, task_run, state):
my_mock("running1")
def exception_hook(task, task_run, state):
raise Exception("bad hook")
def running2(task, task_run, state):
my_mock("running2")
@task(on_running=[running1, exception_hook, running2])
def my_task():
pass
@flow
def my_flow():
return my_task(return_state=True)
state = my_flow()
assert state.type == StateType.COMPLETED
assert my_mock.call_args_list == [call("running1"), call("running2")]
@pytest.mark.parametrize(
"hook1, hook2",
[
(create_hook, create_hook),
(create_hook, create_async_hook),
(create_async_hook, create_hook),
(create_async_hook, create_async_hook),
],
)
def test_on_running_hooks_work_with_sync_and_async(self, hook1, hook2):
my_mock = MagicMock()
hook1_with_mock = hook1(my_mock)
hook2_with_mock = hook2(my_mock)
@task(on_running=[hook1_with_mock, hook2_with_mock])
def my_task():
pass
@flow
def my_flow():
return my_task(return_state=True)
state = my_flow()
assert state.type == StateType.COMPLETED
assert my_mock.call_args_list == [call(), call()]
def test_on_running_hooks_fire_on_retry_with_delay(self):
"""Test that on_running hooks fire on initial run AND on retry with delay."""
my_mock = MagicMock()
def running_hook(task, task_run, state):
my_mock("running")
@task(on_running=[running_hook], retries=1, retry_delay_seconds=0.1)
def my_task():
# Fail on first run, succeed on retry
if my_mock.call_count < 2:
raise ValueError("failing")
return "success"
@flow
def my_flow():
return my_task(return_state=True)
state = my_flow()
assert state.type == StateType.COMPLETED
# Hook should fire twice: once on initial run, once on retry
assert my_mock.call_args_list == [call("running"), call("running")]
def test_on_running_hooks_fire_on_retry_without_delay(self):
"""Test that on_running hooks fire on initial run AND on retry without delay."""
my_mock = MagicMock()
def running_hook(task, task_run, state):
my_mock("running")
@task(on_running=[running_hook], retries=1)
def my_task():
# Fail on first run, succeed on retry
if my_mock.call_count < 2:
raise ValueError("failing")
return "success"
@flow
def my_flow():
return my_task(return_state=True)
state = my_flow()
assert state.type == StateType.COMPLETED
# Hook should fire twice: once on initial run, once on retry
assert my_mock.call_args_list == [call("running"), call("running")]
async def test_task_condition_fn_raises_when_not_a_callable(self):
with pytest.raises(TypeError):
@task(retry_condition_fn="not a callable")
def my_task(): ...
| TestTaskHooksOnRunning |
python | ray-project__ray | release/train_tests/benchmark/config.py | {
"start": 497,
"end": 809
} | class ____(TaskConfig):
TASK_NAME: ClassVar[str] = "image_classification"
class ImageFormat(enum.Enum):
JPEG = "jpeg"
PARQUET = "parquet"
image_classification_local_dataset: bool = False
image_classification_data_format: ImageFormat = ImageFormat.PARQUET
| ImageClassificationConfig |
python | allegroai__clearml | examples/reporting/hyper_parameters.py | {
"start": 1103,
"end": 1158
} | class ____(Enum):
A = 'a'
B = 'b'
| StringEnumClass |
python | ray-project__ray | rllib/env/tests/test_multi_agent_env.py | {
"start": 949,
"end": 2930
} | class ____(MultiAgentEnv):
"""Env of N independent agents, each of which exits after 25 steps."""
metadata = {
"render.modes": ["rgb_array"],
}
render_mode = "rgb_array"
def __init__(self, num):
super().__init__()
self.envs = [MockEnv(25) for _ in range(num)]
self.agents = list(range(num))
self.terminateds = set()
self.truncateds = set()
self.observation_space = gym.spaces.Discrete(2)
self.action_space = gym.spaces.Discrete(2)
self.resetted = False
def reset(self, *, seed=None, options=None):
# Call super's `reset()` method to set the np_random with the value of `seed`.
# Note: This call to super does NOT return anything.
super().reset(seed=seed)
self.resetted = True
self.terminateds = set()
self.truncateds = set()
reset_results = [a.reset() for a in self.envs]
return (
{i: oi[0] for i, oi in enumerate(reset_results)},
{i: oi[1] for i, oi in enumerate(reset_results)},
)
def step(self, action_dict):
obs, rew, terminated, truncated, info = {}, {}, {}, {}, {}
for i, action in action_dict.items():
obs[i], rew[i], terminated[i], truncated[i], info[i] = self.envs[i].step(
action
)
if terminated[i]:
self.terminateds.add(i)
if truncated[i]:
self.truncateds.add(i)
terminated["__all__"] = len(self.terminateds) == len(self.envs)
truncated["__all__"] = len(self.truncateds) == len(self.envs)
return obs, rew, terminated, truncated, info
def render(self):
# Just generate a random image here for demonstration purposes.
# Also see `gym/envs/classic_control/cartpole.py` for
# an example on how to use a Viewer object.
return np.random.randint(0, 256, size=(200, 300, 3), dtype=np.uint8)
| BasicMultiAgent |
python | jazzband__django-oauth-toolkit | oauth2_provider/exceptions.py | {
"start": 1560,
"end": 1676
} | class ____(OIDCError):
error = "logout_denied"
description = "Logout has been refused by the user."
| LogoutDenied |
python | huggingface__transformers | src/transformers/models/udop/modeling_udop.py | {
"start": 1995,
"end": 9480
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only
the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Attention mask used in the model's forward pass to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the
self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks)
that can be used (see `past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
attention_mask: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
def get_visual_bbox(image_size=224, patch_size=16):
image_feature_pool_shape = [image_size // patch_size, image_size // patch_size]
visual_bbox_x = torch.arange(0, 1.0 * (image_feature_pool_shape[1] + 1), 1.0)
visual_bbox_x /= image_feature_pool_shape[1]
visual_bbox_y = torch.arange(0, 1.0 * (image_feature_pool_shape[0] + 1), 1.0)
visual_bbox_y /= image_feature_pool_shape[0]
visual_bbox_input = torch.stack(
[
visual_bbox_x[:-1].repeat(image_feature_pool_shape[0], 1),
visual_bbox_y[:-1].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(image_feature_pool_shape[0], 1),
visual_bbox_y[1:].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
],
dim=-1,
)
visual_bbox_input = visual_bbox_input.view(-1, 4)
return visual_bbox_input
def pad_sequence(seq, target_len, pad_value=0):
if isinstance(seq, torch.Tensor):
n = seq.shape[0]
else:
n = len(seq)
seq = torch.tensor(seq)
m = target_len - n
if m > 0:
ret = torch.stack([pad_value] * m).to(seq)
seq = torch.cat([seq, ret], dim=0)
return seq[:target_len]
def combine_image_text_embeddings(
image_embeddings,
inputs_embeds,
bbox,
visual_bbox,
attention_mask=None,
num_patches=14,
max_len=0,
image_size=224,
patch_size=16,
):
"""
Combine the image and text embeddings for the input to the encoder/decoder of UDOP.
First, the image embeddings are created by checking for each visual patch if it is inside the bounding box of a
token. If it is, the visual patch is combined with the token embedding. Then, the visual bounding boxes are combined
with the text bounding boxes. Finally, the visual bounding boxes are combined with the text attention mask.
"""
sequence_length = num_patches
ocr_points_x = torch.clip(
torch.floor((bbox[:, :, 0] + bbox[:, :, 2]) / 2.0 * sequence_length).long(), 0, sequence_length - 1
)
ocr_points_y = (
torch.clip(torch.floor((bbox[:, :, 1] + bbox[:, :, 3]) / 2.0 * sequence_length).long(), 0, sequence_length - 1)
* sequence_length
)
ocr_points = ocr_points_x + ocr_points_y
# make sure bounding boxes are of type float to calculate means
bbox = bbox.to(torch.float64)
target_seg = (bbox.mean(-1) == 0.0) | (bbox.mean(-1) == 1.0)
repeated_vision_embeds = torch.gather(
image_embeddings, 1, ocr_points.unsqueeze(-1).repeat(1, 1, image_embeddings.size(-1))
)
repeated_vision_embeds[target_seg] = 0.0
inputs_embeds += repeated_vision_embeds
patch_inds = torch.full_like(image_embeddings[:, :, 0], True).bool()
ind = torch.cat(
[
torch.arange(len(ocr_points))[:, None].repeat(1, ocr_points.size(-1))[:, :, None].to(ocr_points),
ocr_points[:, :, None],
],
dim=-1,
)
ind = ind.flatten(0, 1)
rows, cols = zip(*ind)
patch_inds[rows, cols] = False
input_vision_patches = [image_embeddings[i][patch_inds[i]] for i in range(len(patch_inds))]
if visual_bbox is None:
visual_bbox = get_visual_bbox(image_size=image_size, patch_size=patch_size)
visual_bbox = visual_bbox.unsqueeze(0).repeat(image_embeddings.size(0), 1, 1)
visual_bbox = visual_bbox.to(image_embeddings.device)
visual_bbox = [visual_bbox[i][patch_inds[i]] for i in range(len(patch_inds))]
if attention_mask is not None:
visual_attention_mask = [torch.tensor([1] * len(item)).to(attention_mask) for item in visual_bbox]
if max_len == 0:
max_len = image_embeddings.size(1)
else:
max_len = max_len - inputs_embeds.size(1)
inputs_vision_patches = torch.stack(
[pad_sequence(item, max_len, torch.zeros_like(image_embeddings[0, 0])) for item in input_vision_patches]
)
visual_bbox = torch.stack([pad_sequence(item, max_len, torch.zeros_like(bbox[0, 0])) for item in visual_bbox])
if attention_mask is not None:
visual_attention_mask = torch.stack(
[pad_sequence(item, max_len, torch.zeros_like(attention_mask[0, 0])) for item in visual_attention_mask]
)
inputs_embeds = torch.cat([inputs_embeds, inputs_vision_patches], 1)
bbox = torch.cat([bbox, visual_bbox], 1)
if attention_mask is not None:
attention_mask = torch.cat([attention_mask, visual_attention_mask], 1)
return inputs_embeds, bbox, attention_mask
| BaseModelOutputWithAttentionMask |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 138191,
"end": 139421
} | class ____(Set):
"""
A helper class, useful to compare two lists without reference to the order
of elements.
FrozenMultiset represents a hashable set that allows duplicate elements.
"""
def __init__(self, iterable):
self._collection = frozenset(Counter(iterable).items())
def __contains__(self, y):
"""
>>> (0, 1) in _FrozenMultiset([(0, 1), (2,), (0, 1)])
True
"""
return any(y == x for x, _ in self._collection)
def __iter__(self):
"""
>>> sorted(_FrozenMultiset([(0, 1), (2,), (0, 1)]))
[(0, 1), (0, 1), (2,)]
"""
return (x for x, c in self._collection for _ in range(c))
def __len__(self):
"""
>>> len(_FrozenMultiset([(0, 1), (2,), (0, 1)]))
3
"""
return sum(c for x, c in self._collection)
def has_duplicates(self):
"""
>>> _FrozenMultiset([(0, 1), (2,), (0, 1)]).has_duplicates()
True
"""
return any(c != 1 for _, c in self._collection)
def __hash__(self):
return hash(self._collection)
def __repr__(self):
return f'FrozenSet([{", ".join(repr(x) for x in iter(self))}]'
| _FrozenMultiset |
python | django__django | tests/cache/tests.py | {
"start": 10495,
"end": 46129
} | class ____:
# A common set of tests to apply to all cache backends
factory = RequestFactory()
# Some clients raise custom exceptions when .incr() or .decr() are called
# with a non-integer value.
incr_decr_type_error = TypeError
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_default_used_when_none_is_set(self):
"""If None is cached, get() returns it instead of the default."""
cache.set("key_default_none", None)
self.assertIsNone(cache.get("key_default_none", default="default"))
def test_add(self):
# A key can be added to a cache
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set("somekey", "value")
# should not be set in the prefixed cache
self.assertIs(caches["prefix"].has_key("somekey"), False)
caches["prefix"].set("somekey", "value2")
self.assertEqual(cache.get("somekey"), "value")
self.assertEqual(caches["prefix"].get("somekey"), "value2")
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set_many({"a": "a", "b": "b", "c": "c", "d": "d"})
self.assertEqual(
cache.get_many(["a", "c", "d"]), {"a": "a", "c": "c", "d": "d"}
)
self.assertEqual(cache.get_many(["a", "b", "e"]), {"a": "a", "b": "b"})
self.assertEqual(cache.get_many(iter(["a", "b", "e"])), {"a": "a", "b": "b"})
cache.set_many({"x": None, "y": 1})
self.assertEqual(cache.get_many(["x", "y"]), {"x": None, "y": 1})
def test_delete(self):
# Cache keys can be deleted
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertIs(cache.delete("key1"), True)
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_delete_nonexistent(self):
self.assertIs(cache.delete("nonexistent_key"), False)
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), True)
self.assertIs(cache.has_key("goodbye1"), False)
cache.set("no_expiry", "here", None)
self.assertIs(cache.has_key("no_expiry"), True)
cache.set("null", None)
self.assertIs(cache.has_key("null"), True)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
cache.set("null", None)
self.assertIn("null", cache)
def test_incr(self):
# Cache values can be incremented
cache.set("answer", 41)
self.assertEqual(cache.incr("answer"), 42)
self.assertEqual(cache.get("answer"), 42)
self.assertEqual(cache.incr("answer", 10), 52)
self.assertEqual(cache.get("answer"), 52)
self.assertEqual(cache.incr("answer", -10), 42)
with self.assertRaises(ValueError):
cache.incr("does_not_exist")
with self.assertRaises(ValueError):
cache.incr("does_not_exist", -1)
cache.set("null", None)
with self.assertRaises(self.incr_decr_type_error):
cache.incr("null")
def test_decr(self):
# Cache values can be decremented
cache.set("answer", 43)
self.assertEqual(cache.decr("answer"), 42)
self.assertEqual(cache.get("answer"), 42)
self.assertEqual(cache.decr("answer", 10), 32)
self.assertEqual(cache.get("answer"), 32)
self.assertEqual(cache.decr("answer", -10), 42)
with self.assertRaises(ValueError):
cache.decr("does_not_exist")
with self.assertRaises(ValueError):
cache.incr("does_not_exist", -1)
cache.set("null", None)
with self.assertRaises(self.incr_decr_type_error):
cache.decr("null")
def test_close(self):
self.assertTrue(hasattr(cache, "close"))
cache.close()
def test_data_types(self):
# Many different data types can be cached
tests = {
"string": "this is a string",
"int": 42,
"bool": True,
"list": [1, 2, 3, 4],
"tuple": (1, 2, 3, 4),
"dict": {"A": 1, "B": 2},
"function": f,
"class": C,
}
for key, value in tests.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set("question", my_poll)
cached_poll = cache.get("question")
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache
# write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.defer("question")
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set("deferred_queryset", defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.defer("question")
self.assertEqual(defer_qs.count(), 1)
cache.set("deferred_queryset", defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get("deferred_queryset")
# We only want the default expensive calculation run on creation and
# set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set("expire1", "very quickly", 1)
cache.set("expire2", "very quickly", 1)
cache.set("expire3", "very quickly", 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertIs(cache.has_key("expire3"), False)
@retry()
def test_touch(self):
# cache.touch() updates the timeout.
cache.set("expire1", "very quickly", timeout=1)
self.assertIs(cache.touch("expire1", timeout=4), True)
time.sleep(2)
self.assertIs(cache.has_key("expire1"), True)
time.sleep(3)
self.assertIs(cache.has_key("expire1"), False)
# cache.touch() works without the timeout argument.
cache.set("expire1", "very quickly", timeout=1)
self.assertIs(cache.touch("expire1"), True)
time.sleep(2)
self.assertIs(cache.has_key("expire1"), True)
self.assertIs(cache.touch("nonexistent"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
"ascii": "ascii_value",
"unicode_ascii": "Iñtërnâtiônàlizætiøn1",
"Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2",
"ascii2": {"x": 1},
}
# Test `set`
for key, value in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for key, value in stuff.items():
with self.subTest(key=key):
self.assertIs(cache.delete(key), True)
self.assertIs(cache.add(key, value), True)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for key, value in stuff.items():
self.assertIs(cache.delete(key), True)
cache.set_many(stuff)
for key, value in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = "value_to_be_compressed"
compressed_value = compress(value.encode())
# Test set
cache.set("binary1", compressed_value)
compressed_result = cache.get("binary1")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
self.assertIs(cache.add("binary1-add", compressed_value), True)
compressed_result = cache.get("binary1-add")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({"binary1-set_many": compressed_value})
compressed_result = cache.get("binary1-set_many")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_set_many_empty_data(self):
self.assertEqual(cache.set_many({}), [])
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set_many({"key1": "spam", "key2": "eggs", "key3": "ham"})
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_delete_many_no_keys(self):
self.assertIsNone(cache.delete_many([]))
def test_clear(self):
# The cache can be emptied using clear
cache.set_many({"key1": "spam", "key2": "eggs"})
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Follow memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set("key1", "eggs", 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get("key1"), "eggs")
self.assertIs(cache.add("key2", "ham", 60 * 60 * 24 * 30 + 1), True)
self.assertEqual(cache.get("key2"), "ham")
cache.set_many(
{"key3": "sausage", "key4": "lobster bisque"}, 60 * 60 * 24 * 30 + 1
)
self.assertEqual(cache.get("key3"), "sausage")
self.assertEqual(cache.get("key4"), "lobster bisque")
@retry()
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set("key1", "eggs", None)
self.assertEqual(cache.get("key1"), "eggs")
self.assertIs(cache.add("key2", "ham", None), True)
self.assertEqual(cache.get("key2"), "ham")
self.assertIs(cache.add("key1", "new eggs", None), False)
self.assertEqual(cache.get("key1"), "eggs")
cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, None)
self.assertEqual(cache.get("key3"), "sausage")
self.assertEqual(cache.get("key4"), "lobster bisque")
cache.set("key5", "belgian fries", timeout=1)
self.assertIs(cache.touch("key5", timeout=None), True)
time.sleep(2)
self.assertEqual(cache.get("key5"), "belgian fries")
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set("key1", "eggs", 0)
self.assertIsNone(cache.get("key1"))
self.assertIs(cache.add("key2", "ham", 0), True)
self.assertIsNone(cache.get("key2"))
cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, 0)
self.assertIsNone(cache.get("key3"))
self.assertIsNone(cache.get("key4"))
cache.set("key5", "belgian fries", timeout=5)
self.assertIs(cache.touch("key5", timeout=0), True)
self.assertIsNone(cache.get("key5"))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache_name, initial_count, final_count):
try:
cull_cache = caches[cull_cache_name]
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set("cull%d" % i, "value", 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key("cull%d" % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test("cull", 50, 29)
def test_zero_cull(self):
self._perform_cull_test("zero_cull", 50, 19)
def test_cull_delete_when_store_empty(self):
try:
cull_cache = caches["cull"]
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
old_max_entries = cull_cache._max_entries
# Force _cull to delete on first cached record.
cull_cache._max_entries = -1
try:
cull_cache.set("force_cull_delete", "value", 1000)
self.assertIs(cull_cache.has_key("force_cull_delete"), True)
finally:
cull_cache._max_entries = old_max_entries
def _perform_invalid_key_test(self, key, expected_warning, key_func=None):
"""
All the builtin backends should warn (except memcached that should
error) on keys that would be refused by memcached. This encourages
portable caching code without making it too difficult to use production
backends with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = key_func or func
tests = [
("add", [key, 1]),
("get", [key]),
("set", [key, 1]),
("incr", [key]),
("decr", [key]),
("touch", [key]),
("delete", [key]),
("get_many", [[key, "b"]]),
("set_many", [{key: 1, "b": 2}]),
("delete_many", [[key, "b"]]),
]
try:
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertWarns(CacheKeyWarning) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.warning), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = "key with spaces and 清"
self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ("a" * 250) + "清"
expected_warning = (
"Cache key will cause errors if used with memcached: "
"%r (longer than %s)" % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_with_version_key_length(self):
# Custom make_key() that adds a version to the key and exceeds the
# limit.
def key_func(key, *args):
return key + ":1"
key = "a" * 249
expected_warning = (
"Cache key will cause errors if used with memcached: "
"%r (longer than %s)" % (key_func(key), 250)
)
self._perform_invalid_key_test(key, expected_warning, key_func=key_func)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set("answer1", 42)
self.assertEqual(cache.get("answer1"), 42)
self.assertEqual(cache.get("answer1", version=1), 42)
self.assertIsNone(cache.get("answer1", version=2))
self.assertIsNone(caches["v2"].get("answer1"))
self.assertEqual(caches["v2"].get("answer1", version=1), 42)
self.assertIsNone(caches["v2"].get("answer1", version=2))
# set, default version = 1, but manually override version = 2
cache.set("answer2", 42, version=2)
self.assertIsNone(cache.get("answer2"))
self.assertIsNone(cache.get("answer2", version=1))
self.assertEqual(cache.get("answer2", version=2), 42)
self.assertEqual(caches["v2"].get("answer2"), 42)
self.assertIsNone(caches["v2"].get("answer2", version=1))
self.assertEqual(caches["v2"].get("answer2", version=2), 42)
# v2 set, using default version = 2
caches["v2"].set("answer3", 42)
self.assertIsNone(cache.get("answer3"))
self.assertIsNone(cache.get("answer3", version=1))
self.assertEqual(cache.get("answer3", version=2), 42)
self.assertEqual(caches["v2"].get("answer3"), 42)
self.assertIsNone(caches["v2"].get("answer3", version=1))
self.assertEqual(caches["v2"].get("answer3", version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches["v2"].set("answer4", 42, version=1)
self.assertEqual(cache.get("answer4"), 42)
self.assertEqual(cache.get("answer4", version=1), 42)
self.assertIsNone(cache.get("answer4", version=2))
self.assertIsNone(caches["v2"].get("answer4"))
self.assertEqual(caches["v2"].get("answer4", version=1), 42)
self.assertIsNone(caches["v2"].get("answer4", version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
self.assertIs(cache.add("answer1", 42, version=2), True)
self.assertIsNone(cache.get("answer1", version=1))
self.assertEqual(cache.get("answer1", version=2), 42)
self.assertIs(cache.add("answer1", 37, version=2), False)
self.assertIsNone(cache.get("answer1", version=1))
self.assertEqual(cache.get("answer1", version=2), 42)
self.assertIs(cache.add("answer1", 37, version=1), True)
self.assertEqual(cache.get("answer1", version=1), 37)
self.assertEqual(cache.get("answer1", version=2), 42)
# v2 add, using default version = 2
self.assertIs(caches["v2"].add("answer2", 42), True)
self.assertIsNone(cache.get("answer2", version=1))
self.assertEqual(cache.get("answer2", version=2), 42)
self.assertIs(caches["v2"].add("answer2", 37), False)
self.assertIsNone(cache.get("answer2", version=1))
self.assertEqual(cache.get("answer2", version=2), 42)
self.assertIs(caches["v2"].add("answer2", 37, version=1), True)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertEqual(cache.get("answer2", version=2), 42)
# v2 add, default version = 2, but manually override version = 1
self.assertIs(caches["v2"].add("answer3", 42, version=1), True)
self.assertEqual(cache.get("answer3", version=1), 42)
self.assertIsNone(cache.get("answer3", version=2))
self.assertIs(caches["v2"].add("answer3", 37, version=1), False)
self.assertEqual(cache.get("answer3", version=1), 42)
self.assertIsNone(cache.get("answer3", version=2))
self.assertIs(caches["v2"].add("answer3", 37), True)
self.assertEqual(cache.get("answer3", version=1), 42)
self.assertEqual(cache.get("answer3", version=2), 37)
def test_cache_versioning_has_key(self):
cache.set("answer1", 42)
# has_key
self.assertIs(cache.has_key("answer1"), True)
self.assertIs(cache.has_key("answer1", version=1), True)
self.assertIs(cache.has_key("answer1", version=2), False)
self.assertIs(caches["v2"].has_key("answer1"), False)
self.assertIs(caches["v2"].has_key("answer1", version=1), True)
self.assertIs(caches["v2"].has_key("answer1", version=2), False)
def test_cache_versioning_delete(self):
cache.set("answer1", 37, version=1)
cache.set("answer1", 42, version=2)
self.assertIs(cache.delete("answer1"), True)
self.assertIsNone(cache.get("answer1", version=1))
self.assertEqual(cache.get("answer1", version=2), 42)
cache.set("answer2", 37, version=1)
cache.set("answer2", 42, version=2)
self.assertIs(cache.delete("answer2", version=2), True)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertIsNone(cache.get("answer2", version=2))
cache.set("answer3", 37, version=1)
cache.set("answer3", 42, version=2)
self.assertIs(caches["v2"].delete("answer3"), True)
self.assertEqual(cache.get("answer3", version=1), 37)
self.assertIsNone(cache.get("answer3", version=2))
cache.set("answer4", 37, version=1)
cache.set("answer4", 42, version=2)
self.assertIs(caches["v2"].delete("answer4", version=1), True)
self.assertIsNone(cache.get("answer4", version=1))
self.assertEqual(cache.get("answer4", version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set("answer1", 37, version=1)
cache.set("answer1", 42, version=2)
self.assertEqual(cache.incr("answer1"), 38)
self.assertEqual(cache.get("answer1", version=1), 38)
self.assertEqual(cache.get("answer1", version=2), 42)
self.assertEqual(cache.decr("answer1"), 37)
self.assertEqual(cache.get("answer1", version=1), 37)
self.assertEqual(cache.get("answer1", version=2), 42)
cache.set("answer2", 37, version=1)
cache.set("answer2", 42, version=2)
self.assertEqual(cache.incr("answer2", version=2), 43)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertEqual(cache.get("answer2", version=2), 43)
self.assertEqual(cache.decr("answer2", version=2), 42)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertEqual(cache.get("answer2", version=2), 42)
cache.set("answer3", 37, version=1)
cache.set("answer3", 42, version=2)
self.assertEqual(caches["v2"].incr("answer3"), 43)
self.assertEqual(cache.get("answer3", version=1), 37)
self.assertEqual(cache.get("answer3", version=2), 43)
self.assertEqual(caches["v2"].decr("answer3"), 42)
self.assertEqual(cache.get("answer3", version=1), 37)
self.assertEqual(cache.get("answer3", version=2), 42)
cache.set("answer4", 37, version=1)
cache.set("answer4", 42, version=2)
self.assertEqual(caches["v2"].incr("answer4", version=1), 38)
self.assertEqual(cache.get("answer4", version=1), 38)
self.assertEqual(cache.get("answer4", version=2), 42)
self.assertEqual(caches["v2"].decr("answer4", version=1), 37)
self.assertEqual(cache.get("answer4", version=1), 37)
self.assertEqual(cache.get("answer4", version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({"ford1": 37, "arthur1": 42})
self.assertEqual(
cache.get_many(["ford1", "arthur1"]), {"ford1": 37, "arthur1": 42}
)
self.assertEqual(
cache.get_many(["ford1", "arthur1"], version=1),
{"ford1": 37, "arthur1": 42},
)
self.assertEqual(cache.get_many(["ford1", "arthur1"], version=2), {})
self.assertEqual(caches["v2"].get_many(["ford1", "arthur1"]), {})
self.assertEqual(
caches["v2"].get_many(["ford1", "arthur1"], version=1),
{"ford1": 37, "arthur1": 42},
)
self.assertEqual(caches["v2"].get_many(["ford1", "arthur1"], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({"ford2": 37, "arthur2": 42}, version=2)
self.assertEqual(cache.get_many(["ford2", "arthur2"]), {})
self.assertEqual(cache.get_many(["ford2", "arthur2"], version=1), {})
self.assertEqual(
cache.get_many(["ford2", "arthur2"], version=2),
{"ford2": 37, "arthur2": 42},
)
self.assertEqual(
caches["v2"].get_many(["ford2", "arthur2"]), {"ford2": 37, "arthur2": 42}
)
self.assertEqual(caches["v2"].get_many(["ford2", "arthur2"], version=1), {})
self.assertEqual(
caches["v2"].get_many(["ford2", "arthur2"], version=2),
{"ford2": 37, "arthur2": 42},
)
# v2 set, using default version = 2
caches["v2"].set_many({"ford3": 37, "arthur3": 42})
self.assertEqual(cache.get_many(["ford3", "arthur3"]), {})
self.assertEqual(cache.get_many(["ford3", "arthur3"], version=1), {})
self.assertEqual(
cache.get_many(["ford3", "arthur3"], version=2),
{"ford3": 37, "arthur3": 42},
)
self.assertEqual(
caches["v2"].get_many(["ford3", "arthur3"]), {"ford3": 37, "arthur3": 42}
)
self.assertEqual(caches["v2"].get_many(["ford3", "arthur3"], version=1), {})
self.assertEqual(
caches["v2"].get_many(["ford3", "arthur3"], version=2),
{"ford3": 37, "arthur3": 42},
)
# v2 set, default version = 2, but manually override version = 1
caches["v2"].set_many({"ford4": 37, "arthur4": 42}, version=1)
self.assertEqual(
cache.get_many(["ford4", "arthur4"]), {"ford4": 37, "arthur4": 42}
)
self.assertEqual(
cache.get_many(["ford4", "arthur4"], version=1),
{"ford4": 37, "arthur4": 42},
)
self.assertEqual(cache.get_many(["ford4", "arthur4"], version=2), {})
self.assertEqual(caches["v2"].get_many(["ford4", "arthur4"]), {})
self.assertEqual(
caches["v2"].get_many(["ford4", "arthur4"], version=1),
{"ford4": 37, "arthur4": 42},
)
self.assertEqual(caches["v2"].get_many(["ford4", "arthur4"], version=2), {})
def test_incr_version(self):
cache.set("answer", 42, version=2)
self.assertIsNone(cache.get("answer"))
self.assertIsNone(cache.get("answer", version=1))
self.assertEqual(cache.get("answer", version=2), 42)
self.assertIsNone(cache.get("answer", version=3))
self.assertEqual(cache.incr_version("answer", version=2), 3)
self.assertIsNone(cache.get("answer"))
self.assertIsNone(cache.get("answer", version=1))
self.assertIsNone(cache.get("answer", version=2))
self.assertEqual(cache.get("answer", version=3), 42)
caches["v2"].set("answer2", 42)
self.assertEqual(caches["v2"].get("answer2"), 42)
self.assertIsNone(caches["v2"].get("answer2", version=1))
self.assertEqual(caches["v2"].get("answer2", version=2), 42)
self.assertIsNone(caches["v2"].get("answer2", version=3))
self.assertEqual(caches["v2"].incr_version("answer2"), 3)
self.assertIsNone(caches["v2"].get("answer2"))
self.assertIsNone(caches["v2"].get("answer2", version=1))
self.assertIsNone(caches["v2"].get("answer2", version=2))
self.assertEqual(caches["v2"].get("answer2", version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version("does_not_exist")
cache.set("null", None)
self.assertEqual(cache.incr_version("null"), 2)
def test_decr_version(self):
cache.set("answer", 42, version=2)
self.assertIsNone(cache.get("answer"))
self.assertIsNone(cache.get("answer", version=1))
self.assertEqual(cache.get("answer", version=2), 42)
self.assertEqual(cache.decr_version("answer", version=2), 1)
self.assertEqual(cache.get("answer"), 42)
self.assertEqual(cache.get("answer", version=1), 42)
self.assertIsNone(cache.get("answer", version=2))
caches["v2"].set("answer2", 42)
self.assertEqual(caches["v2"].get("answer2"), 42)
self.assertIsNone(caches["v2"].get("answer2", version=1))
self.assertEqual(caches["v2"].get("answer2", version=2), 42)
self.assertEqual(caches["v2"].decr_version("answer2"), 1)
self.assertIsNone(caches["v2"].get("answer2"))
self.assertEqual(caches["v2"].get("answer2", version=1), 42)
self.assertIsNone(caches["v2"].get("answer2", version=2))
with self.assertRaises(ValueError):
cache.decr_version("does_not_exist", version=2)
cache.set("null", None, version=2)
self.assertEqual(cache.decr_version("null", version=2), 1)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set("answer1", 42)
self.assertEqual(cache.get("answer1"), 42)
self.assertIsNone(caches["custom_key"].get("answer1"))
self.assertIsNone(caches["custom_key2"].get("answer1"))
caches["custom_key"].set("answer2", 42)
self.assertIsNone(cache.get("answer2"))
self.assertEqual(caches["custom_key"].get("answer2"), 42)
self.assertEqual(caches["custom_key2"].get("answer2"), 42)
@override_settings(CACHE_MIDDLEWARE_ALIAS=DEFAULT_CACHE_ALIAS)
def test_cache_write_unpicklable_object(self):
fetch_middleware = FetchFromCacheMiddleware(empty_response)
request = self.factory.get("/cache/test")
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNone(get_cache_data)
content = "Testing cookie serialization."
def get_response(req):
response = HttpResponse(content)
response.set_cookie("foo", "bar")
return response
update_middleware = UpdateCacheMiddleware(get_response)
response = update_middleware(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
UpdateCacheMiddleware(lambda req: get_cache_data)(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add("unpicklable", Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set("unpicklable", Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get("projector"))
self.assertEqual(cache.get_or_set("projector", 42), 42)
self.assertEqual(cache.get("projector"), 42)
self.assertIsNone(cache.get_or_set("null", None))
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get("null", "default"))
def test_get_or_set_callable(self):
def my_callable():
return "value"
self.assertEqual(cache.get_or_set("mykey", my_callable), "value")
self.assertEqual(cache.get_or_set("mykey", my_callable()), "value")
self.assertIsNone(cache.get_or_set("null", lambda: None))
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get("null", "default"))
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
self.assertEqual(cache.get_or_set("brian", 1979, version=2), 1979)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set("brian")
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set("brian", version=1)
self.assertIsNone(cache.get("brian", version=1))
self.assertEqual(cache.get_or_set("brian", 42, version=1), 42)
self.assertEqual(cache.get_or_set("brian", 1979, version=2), 1979)
self.assertIsNone(cache.get("brian", version=3))
def test_get_or_set_racing(self):
with mock.patch(
"%s.%s" % (settings.CACHES["default"]["BACKEND"], "add")
) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set("key", "default"), "default")
@override_settings(
CACHES=caches_setting_for_tests(
BACKEND="django.core.cache.backends.db.DatabaseCache",
# Spaces are used in the table name to ensure quoting/escaping is
# working
LOCATION="test cache table",
)
)
| BaseCacheTests |
python | doocs__leetcode | solution/2900-2999/2944.Minimum Number of Coins for Fruits/Solution.py | {
"start": 0,
"end": 294
} | class ____:
def minimumCoins(self, prices: List[int]) -> int:
@cache
def dfs(i: int) -> int:
if i * 2 >= len(prices):
return prices[i - 1]
return prices[i - 1] + min(dfs(j) for j in range(i + 1, i * 2 + 2))
return dfs(1)
| Solution |
python | getsentry__sentry | src/sentry/mail/forms/member_team.py | {
"start": 327,
"end": 3214
} | class ____(forms.Form, Generic[T]):
targetType = forms.ChoiceField()
targetIdentifier = forms.CharField(
required=False, help_text="Only required if 'Member' or 'Team' is selected"
)
teamValue: T
memberValue: T
targetTypeEnum: type[T]
def __init__(self, project: Project, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.project = project
def clean_targetIdentifier(self) -> int | None:
targetIdentifier = self.cleaned_data.get("targetIdentifier")
# XXX: Clean up some bad data in the database
if targetIdentifier == "None":
targetIdentifier = None
if targetIdentifier:
try:
targetIdentifier = int(targetIdentifier)
except ValueError:
raise forms.ValidationError("targetIdentifier must be an integer")
return targetIdentifier
def clean(self) -> None:
super().clean()
cleaned_data = self.cleaned_data
try:
targetType = self.targetTypeEnum(cleaned_data.get("targetType"))
except ValueError:
msg = forms.ValidationError("Invalid targetType specified")
self.add_error("targetType", msg)
return
targetIdentifier = cleaned_data.get("targetIdentifier")
self.cleaned_data["targetType"] = targetType.value
if targetType != self.teamValue and targetType != self.memberValue:
return
if not targetIdentifier:
msg = forms.ValidationError("You need to specify a Team or Member.")
self.add_error("targetIdentifier", msg)
return
if (
targetType == self.teamValue
and not Project.objects.filter(
teams__id=int(targetIdentifier), id=self.project.id
).exists()
):
msg = forms.ValidationError("This team is not part of the project.")
self.add_error("targetIdentifier", msg)
return
if targetType == self.memberValue:
is_active_team_member = OrganizationMemberTeam.objects.filter(
is_active=True,
organizationmember__user_id=int(targetIdentifier),
organizationmember__teams__projectteam__project_id=self.project.id,
).exists()
if is_active_team_member:
is_active_team_member = bool(
user_service.get_many(
filter=dict(user_ids=[int(targetIdentifier)], is_active=True)
)
)
if not is_active_team_member:
msg = forms.ValidationError("This user is not part of the project.")
self.add_error("targetIdentifier", msg)
return
self.cleaned_data["targetIdentifier"] = targetIdentifier
| MemberTeamForm |
python | django__django | django/core/files/uploadhandler.py | {
"start": 1245,
"end": 1397
} | class ____(UploadFileException):
"""
This exception is raised by an upload handler that wants to skip a given
file.
"""
pass
| SkipFile |
python | numba__numba | numba/testing/main.py | {
"start": 22728,
"end": 23362
} | class ____(runner.TextTestResult):
"""
A TestResult able to inject results from other results.
"""
def add_results(self, result):
"""
Add the results from the other *result* to this result.
"""
self.stream.write(result.stream.getvalue())
self.stream.flush()
self.testsRun += result.testsRun
self.failures.extend(result.failures)
self.errors.extend(result.errors)
self.skipped.extend(result.skipped)
self.expectedFailures.extend(result.expectedFailures)
self.unexpectedSuccesses.extend(result.unexpectedSuccesses)
| ParallelTestResult |
python | openai__openai-python | src/openai/resources/realtime/realtime.py | {
"start": 7629,
"end": 8062
} | class ____:
def __init__(self, realtime: Realtime) -> None:
self._realtime = realtime
@cached_property
def client_secrets(self) -> ClientSecretsWithStreamingResponse:
return ClientSecretsWithStreamingResponse(self._realtime.client_secrets)
@cached_property
def calls(self) -> CallsWithStreamingResponse:
return CallsWithStreamingResponse(self._realtime.calls)
| RealtimeWithStreamingResponse |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/facebook_ads_to_gcs.py | {
"start": 1566,
"end": 10362
} | class ____(BaseOperator):
"""
Fetch from Facebook Ads API.
This converts and saves the data as a temporary JSON file, and uploads the
JSON to Google Cloud Storage.
.. seealso::
For more information on the Facebook Ads API, take a look at the API docs:
https://developers.facebook.com/docs/marketing-apis/
.. seealso::
For more information on the Facebook Ads Python SDK, take a look at the docs:
https://github.com/facebook/facebook-python-business-sdk
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FacebookAdsReportToGcsOperator`
:param bucket_name: The GCS bucket to upload to
:param object_name: GCS path to save the object. Must be the full file path (ex. `path/to/file.txt`)
:param gcp_conn_id: Airflow Google Cloud connection ID
:param facebook_conn_id: Airflow Facebook Ads connection ID
:param api_version: The version of Facebook API. Default to None. If it is None,
it will use the Facebook business SDK default version.
:param fields: List of fields that is obtained from Facebook. Found in AdsInsights.Field class.
https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0
:param parameters: Parameters that determine the query for Facebook
https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0
:param gzip: Option to compress local file or file data for upload
:param upload_as_account: Option to export file with account_id
This parameter only works if Account Id sets as array in Facebook Connection
If set as True, each file will be exported in a separate file that has a prefix of account_id
If set as False, a single file will be exported for all account_id
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"facebook_conn_id",
"bucket_name",
"object_name",
"impersonation_chain",
"parameters",
)
def __init__(
self,
*,
bucket_name: str,
object_name: str,
fields: list[str],
parameters: dict[str, Any] | None = None,
gzip: bool = False,
upload_as_account: bool = False,
api_version: str | None = None,
gcp_conn_id: str = "google_cloud_default",
facebook_conn_id: str = "facebook_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.object_name = object_name
self.gcp_conn_id = gcp_conn_id
self.facebook_conn_id = facebook_conn_id
self.api_version = api_version
self.fields = fields
self.parameters = parameters
self.gzip = gzip
self.upload_as_account = upload_as_account
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
service = FacebookAdsReportingHook(
facebook_conn_id=self.facebook_conn_id, api_version=self.api_version
)
bulk_report = service.bulk_facebook_report(params=self.parameters, fields=self.fields)
if isinstance(bulk_report, list):
converted_rows_with_action = self._generate_rows_with_action(False)
converted_rows_with_action = self._prepare_rows_for_upload(
rows=bulk_report, converted_rows_with_action=converted_rows_with_action, account_id=None
)
elif isinstance(bulk_report, dict):
converted_rows_with_action = self._generate_rows_with_action(True)
for account_id in bulk_report.keys():
rows = bulk_report.get(account_id, [])
if rows:
converted_rows_with_action = self._prepare_rows_for_upload(
rows=rows,
converted_rows_with_action=converted_rows_with_action,
account_id=account_id,
)
else:
self.log.warning("account_id: %s returned empty report", account_id)
else:
message = (
"Facebook Ads Hook returned different type than expected. Expected return types should be "
f"List or Dict. Actual return type of the Hook: {type(bulk_report)}"
)
raise AirflowException(message)
total_row_count = self._decide_and_flush(converted_rows_with_action=converted_rows_with_action)
self.log.info("Facebook Returned %s data points in total: ", total_row_count)
def _generate_rows_with_action(self, type_check: bool):
if type_check and self.upload_as_account:
return {FlushAction.EXPORT_EVERY_ACCOUNT: []}
return {FlushAction.EXPORT_ONCE: []}
def _prepare_rows_for_upload(
self,
rows: list[AdsInsights],
converted_rows_with_action: dict[FlushAction, list],
account_id: str | None,
):
converted_rows = [dict(row) for row in rows]
if account_id is not None and self.upload_as_account:
converted_rows_with_action[FlushAction.EXPORT_EVERY_ACCOUNT].append(
{"account_id": account_id, "converted_rows": converted_rows}
)
self.log.info(
"Facebook Returned %s data points for account_id: %s", len(converted_rows), account_id
)
else:
converted_rows_with_action[FlushAction.EXPORT_ONCE].extend(converted_rows)
self.log.info("Facebook Returned %s data points ", len(converted_rows))
return converted_rows_with_action
def _decide_and_flush(self, converted_rows_with_action: dict[FlushAction, list]):
total_data_count = 0
once_action = converted_rows_with_action.get(FlushAction.EXPORT_ONCE)
if once_action is not None:
self._flush_rows(
converted_rows=once_action,
object_name=self.object_name,
)
total_data_count += len(once_action)
else:
every_account_action = converted_rows_with_action.get(FlushAction.EXPORT_EVERY_ACCOUNT)
if every_account_action:
for converted_rows in every_account_action:
self._flush_rows(
converted_rows=converted_rows.get("converted_rows"),
object_name=self._transform_object_name_with_account_id(
account_id=converted_rows.get("account_id")
),
)
total_data_count += len(converted_rows.get("converted_rows"))
else:
message = (
"FlushAction not found in the data. Please check the FlushAction in "
f"the operator. Converted Rows with Action: {converted_rows_with_action}"
)
raise AirflowException(message)
return total_data_count
def _flush_rows(self, converted_rows: list[Any] | None, object_name: str):
if converted_rows:
headers = self.fields
with tempfile.NamedTemporaryFile("w", suffix=".csv") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
writer.writerows(converted_rows)
csvfile.flush()
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.upload(
bucket_name=self.bucket_name,
object_name=object_name,
filename=csvfile.name,
gzip=self.gzip,
)
self.log.info("%s uploaded to GCS", csvfile.name)
def _transform_object_name_with_account_id(self, account_id: str):
directory_parts = self.object_name.split("/")
directory_parts[-1] = f"{account_id}_{directory_parts[-1]}"
return "/".join(directory_parts)
| FacebookAdsReportToGcsOperator |
python | django__django | django/core/signing.py | {
"start": 1920,
"end": 3292
} | class ____(BadSignature):
"""Signature timestamp is older than required max_age."""
pass
def b62_encode(s):
if s == 0:
return "0"
sign = "-" if s < 0 else ""
s = abs(s)
encoded = ""
while s > 0:
s, remainder = divmod(s, 62)
encoded = BASE62_ALPHABET[remainder] + encoded
return sign + encoded
def b62_decode(s):
if s == "0":
return 0
sign = 1
if s[0] == "-":
s = s[1:]
sign = -1
decoded = 0
for digit in s:
decoded = decoded * 62 + BASE62_ALPHABET.index(digit)
return sign * decoded
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b"=")
def b64_decode(s):
pad = b"=" * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key, algorithm="sha1"):
return b64_encode(
salted_hmac(salt, value, key, algorithm=algorithm).digest()
).decode()
def _cookie_signer_key(key):
# SECRET_KEYS items may be str or bytes.
return b"django.http.cookies" + force_bytes(key)
def get_cookie_signer(salt="django.core.signing.get_cookie_signer"):
Signer = import_string(settings.SIGNING_BACKEND)
return Signer(
key=_cookie_signer_key(settings.SECRET_KEY),
fallback_keys=map(_cookie_signer_key, settings.SECRET_KEY_FALLBACKS),
salt=salt,
)
| SignatureExpired |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 15861,
"end": 17002
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
pause_status: Optional[Literal["PAUSED", "UNPAUSED"]] = Field(
None,
description="Indicate whether this schedule is paused or not.",
examples=["PAUSED"],
)
quartz_cron_expression: str = Field(
...,
description=(
"A Cron expression using Quartz syntax that describes the schedule for a"
" job. See [Cron"
" Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)"
" for details. This field is required."
),
examples=["20 30 * * * ?"],
)
timezone_id: str = Field(
...,
description=(
"A Java timezone ID. The schedule for a job is resolved with respect to"
" this timezone. See [Java"
" TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html)"
" for details. This field is required."
),
examples=["Europe/London"],
)
| CronSchedule |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/s3.py | {
"start": 3287,
"end": 5212
} | class ____(AwsBaseOperator[S3Hook]):
"""
This operator deletes an S3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3DeleteBucketOperator`
:param bucket_name: This is bucket name you want to delete
:param force_delete: Forcibly delete all objects in the bucket before deleting the bucket
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
template_fields: Sequence[str] = aws_template_fields("bucket_name")
aws_hook_class = S3Hook
def __init__(
self,
bucket_name: str,
force_delete: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.force_delete = force_delete
def execute(self, context: Context):
if self.hook.check_for_bucket(self.bucket_name):
self.hook.delete_bucket(bucket_name=self.bucket_name, force_delete=self.force_delete)
self.log.info("Deleted bucket with name: %s", self.bucket_name)
else:
self.log.info("Bucket with name: %s doesn't exist", self.bucket_name)
| S3DeleteBucketOperator |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 19502,
"end": 20265
} | class ____(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, "", NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions += [
*(self._handle_param(arg, "", NUMERIC_TYPES) for arg in args[2:]),
*(self._handle_param(arg, "", NUMERIC_TYPES) for arg in args[0:2]),
]
else:
raise ValueError("Must provide 1, 2, or 4 arguments to `SnapToGrid`.")
super().__init__(*expressions, **extra)
| SnapToGrid |
python | huggingface__transformers | src/transformers/models/detr/modeling_detr.py | {
"start": 76276,
"end": 77807
} | class ____(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask: Optional[Tensor] = None):
q = self.q_linear(q)
k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)
queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])
weights = torch.einsum("bqnc,bnchw->bqnhw", queries_per_head * self.normalize_fact, keys_per_head)
if mask is not None:
weights = weights.masked_fill(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min)
weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size())
weights = self.dropout(weights)
return weights
__all__ = [
"DetrForObjectDetection",
"DetrForSegmentation",
"DetrModel",
"DetrPreTrainedModel",
]
| DetrMHAttentionMap |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-localai/llama_index/llms/localai/base.py | {
"start": 1063,
"end": 4466
} | class ____(OpenAI):
"""
LocalAI LLM class.
Examples:
`pip install llama-index-llms-localai`
```python
from llama_index.llms.localai import LocalAI
llm = LocalAI(api_base="http://localhost:8080/v1")
response = llm.complete("Hello!")
print(str(response))
```
"""
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
globally_use_chat_completions: Optional[bool] = Field(
default=None,
description=(
"Set None (default) to per-invocation decide on using /chat/completions"
" vs /completions endpoints with query keyword arguments,"
" set False to universally use /completions endpoint,"
" set True to universally use /chat/completions endpoint."
),
)
def __init__(
self,
api_key: Optional[str] = LOCALAI_DEFAULTS["api_key"],
api_base: Optional[str] = LOCALAI_DEFAULTS["api_base"],
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
super().__init__(
api_key=api_key,
api_base=api_base,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
warnings.warn(
(
f"{type(self).__name__} subclass is deprecated in favor of"
f" {OpenAILike.__name__} composition. The deprecation cycle"
" will complete sometime in late December 2023."
),
DeprecationWarning,
stacklevel=2,
)
@classmethod
def class_name(cls) -> str:
return "LocalAI"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens or -1,
is_chat_model=self._is_chat_model,
is_function_calling_model=is_function_calling_model(
model=self._get_model_name()
),
model_name=self.model,
)
def _update_max_tokens(self, all_kwargs: Dict[str, Any], prompt: str) -> None:
# This subclass only supports max_tokens via LocalAI(..., max_tokens=123)
del all_kwargs, prompt # Unused
# do nothing
@property
def _is_chat_model(self) -> bool:
if self.globally_use_chat_completions is not None:
return self.globally_use_chat_completions
raise NotImplementedError(
"Inferring of when to use /chat/completions is unsupported by"
f" {type(self).__name__}. Please either set 'globally_use_chat_completions'"
" arg during construction, or pass the arg 'use_chat_completions' in your"
" query, setting True for /chat/completions or False for /completions."
)
| LocalAI |
python | PrefectHQ__prefect | src/prefect/concurrency/v1/services.py | {
"start": 715,
"end": 2958
} | class ____(
FutureQueueService[Unpack[tuple[UUID, Optional[float]]], httpx.Response]
):
def __init__(self, concurrency_limit_names: frozenset[str]) -> None:
super().__init__(concurrency_limit_names)
self._client: PrefectClient
self.concurrency_limit_names: list[str] = sorted(list(concurrency_limit_names))
@asynccontextmanager
async def _lifespan(self) -> AsyncGenerator[None, None]:
async with get_client() as client:
self._client = client
yield
async def acquire(
self, task_run_id: UUID, timeout_seconds: Optional[float] = None
) -> httpx.Response:
with timeout_async(seconds=timeout_seconds):
while True:
try:
return await self._client.increment_v1_concurrency_slots(
task_run_id=task_run_id,
names=self.concurrency_limit_names,
)
except httpx.HTTPStatusError as exc:
if not exc.response.status_code == status.HTTP_423_LOCKED:
raise
retry_after = exc.response.headers.get("Retry-After")
if retry_after:
retry_after = float(retry_after)
await asyncio.sleep(retry_after)
else:
# We received a 423 but no Retry-After header. This
# should indicate that the server told us to abort
# because the concurrency limit is set to 0, i.e.
# effectively disabled.
try:
reason = exc.response.json()["detail"]
except (JSONDecodeError, KeyError):
logger.error(
"Failed to parse response from concurrency limit 423 Locked response: %s",
exc.response.content,
)
reason = "Concurrency limit is locked (server did not specify the reason)"
raise ConcurrencySlotAcquisitionServiceError(reason) from exc
| ConcurrencySlotAcquisitionService |
python | PrefectHQ__prefect | tests/utilities/test_callables.py | {
"start": 19529,
"end": 21882
} | class ____:
def test_flow_with_args_docstring(self):
def f(x):
"""Function f.
Args:
x: required argument x
"""
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {"title": "x", "description": "required argument x", "position": 0}
},
"required": ["x"],
"definitions": {},
}
def test_flow_without_docstring(self):
def f(x):
pass
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "position": 0}},
"required": ["x"],
"definitions": {},
}
def test_flow_without_args_docstring(self):
def f(x):
"""Function f."""
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {"x": {"title": "x", "position": 0}},
"required": ["x"],
"definitions": {},
}
def test_flow_with_complex_args_docstring(self):
def f(x, y):
"""Function f.
Second line of docstring.
Args:
x: required argument x
y (str): required typed argument y
with second line
Returns:
None: nothing
"""
schema = callables.parameter_schema(f)
assert schema.model_dump_for_openapi() == {
"title": "Parameters",
"type": "object",
"properties": {
"x": {
"title": "x",
"description": "required argument x",
"position": 0,
},
"y": {
"title": "y",
"description": "required typed argument y\nwith second line",
"position": 1,
},
},
"required": ["x", "y"],
"definitions": {},
}
| TestParseFlowDescriptionToSchema |
python | viewflow__viewflow | viewflow/utils.py | {
"start": 4558,
"end": 5308
} | class ____(Generic[T]):
"""
Descriptor class that creates a lazy singleton instance.
This descriptor can be used as a class attribute, and the first time the
attribute is accessed, it creates an instance of the class. Subsequent
accesses return the same instance, effectively making the class a singleton.
"""
def __init__(self) -> None: # noqa D102
self.instance: T | None = None
def __get__(
self,
instance: Any | None = None,
owner: type[T] | None = None,
) -> T:
if self.instance is None:
if owner is None:
raise ValueError("Owner class not provided")
self.instance = owner()
return self.instance
| LazySingletonDescriptor |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py | {
"start": 23445,
"end": 26800
} | class ____(nn.Module):
def __init__(self, config: Qwen3VLMoeVisionConfig) -> None:
super().__init__()
self.dim = config.hidden_size
self.num_heads = config.num_heads
self.head_dim = self.dim // self.num_heads
self.num_key_value_groups = 1 # needed for eager attention
self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
self.proj = nn.Linear(self.dim, self.dim)
self.scaling = self.head_dim**-0.5
self.config = config
self.attention_dropout = 0.0
self.is_causal = False
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs,
) -> torch.Tensor:
seq_length = hidden_states.shape[0]
query_states, key_states, value_states = (
self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
query_states = query_states.transpose(0, 1).unsqueeze(0)
key_states = key_states.transpose(0, 1).unsqueeze(0)
value_states = value_states.transpose(0, 1).unsqueeze(0)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
if self.config._attn_implementation == "flash_attention_2":
# Flash Attention 2: Use cu_seqlens for variable length attention
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
attn_output, _ = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
cu_seq_lens_q=cu_seqlens,
cu_seq_lens_k=cu_seqlens,
max_length_q=max_seqlen,
max_length_k=max_seqlen,
is_causal=False,
**kwargs,
)
else:
# Other implementations: Process each chunk separately
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
splits = [
torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
]
attn_outputs = [
attention_interface(
self,
q,
k,
v,
attention_mask=None,
scaling=self.scaling,
dropout=0.0 if not self.training else self.attention_dropout,
is_causal=False,
**kwargs,
)[0]
for q, k, v in zip(*splits)
]
attn_output = torch.cat(attn_outputs, dim=1)
attn_output = attn_output.reshape(seq_length, -1).contiguous()
attn_output = self.proj(attn_output)
return attn_output
| Qwen3VLMoeVisionAttention |
python | jschneier__django-storages | tests/test_dropbox.py | {
"start": 1644,
"end": 5839
} | class ____(TestCase):
def setUp(self, *args):
self.storage = dropbox.DropboxStorage("foo")
def test_no_access_token(self, *args):
with self.assertRaises(ImproperlyConfigured):
dropbox.DropboxStorage(None)
def test_setting_access_token(self):
with override_settings(DROPBOX_OAUTH2_TOKEN="abc"):
storage = dropbox.DropboxStorage()
self.assertEqual(storage.oauth2_access_token, "abc")
def test_refresh_token_app_key_no_app_secret(self, *args):
inputs = {
"oauth2_refresh_token": "foo",
"app_key": "bar",
}
with self.assertRaises(ImproperlyConfigured):
dropbox.DropboxStorage(**inputs)
def test_refresh_token_app_secret_no_app_key(self, *args):
inputs = {
"oauth2_refresh_token": "foo",
"app_secret": "bar",
}
with self.assertRaises(ImproperlyConfigured):
dropbox.DropboxStorage(**inputs)
def test_app_key_app_secret_no_refresh_token(self, *args):
inputs = {
"app_key": "foo",
"app_secret": "bar",
}
with self.assertRaises(ImproperlyConfigured):
dropbox.DropboxStorage(**inputs)
@mock.patch("dropbox.Dropbox.files_delete", return_value=FILE_METADATA_MOCK)
def test_delete(self, *args):
self.storage.delete("foo")
@mock.patch("dropbox.Dropbox.files_get_metadata", return_value=[FILE_METADATA_MOCK])
def test_exists(self, *args):
exists = self.storage.exists("foo")
self.assertTrue(exists)
@mock.patch("dropbox.Dropbox.files_get_metadata", return_value=[])
def test_not_exists(self, *args):
exists = self.storage.exists("bar")
self.assertFalse(exists)
@mock.patch("dropbox.Dropbox.files_list_folder", return_value=FILES_MOCK)
def test_listdir(self, *args):
dirs, files = self.storage.listdir("/")
dirs2, files2 = self.storage.listdir("")
self.assertEqual(dirs, dirs2)
self.assertEqual(files2, files2)
self.assertGreater(len(dirs), 0)
self.assertGreater(len(files), 0)
self.assertEqual(dirs[0], "bar")
self.assertEqual(files[0], "foo.txt")
@mock.patch("dropbox.Dropbox.files_get_metadata", return_value=FILE_METADATA_MOCK)
def test_size(self, *args):
size = self.storage.size("foo")
self.assertEqual(size, FILE_METADATA_MOCK.size)
def test_open(self, *args):
obj = self.storage._open("foo")
self.assertIsInstance(obj, File)
@mock.patch("dropbox.Dropbox.files_upload", return_value="foo")
@mock.patch("dropbox.Dropbox.files_get_metadata", return_value=None)
def test_save(self, files_upload, *args):
name = self.storage.save("foo", File(io.BytesIO(b"bar"), "foo"))
self.assertTrue(files_upload.called)
self.assertEqual(name, "foo")
@mock.patch("dropbox.Dropbox.files_upload")
@mock.patch("dropbox.Dropbox.files_upload_session_finish")
@mock.patch("dropbox.Dropbox.files_upload_session_append_v2")
@mock.patch(
"dropbox.Dropbox.files_upload_session_start",
return_value=mock.MagicMock(session_id="foo"),
)
def test_chunked_upload(self, start, append, finish, upload):
large_file = File(io.BytesIO(b"bar" * self.storage.CHUNK_SIZE), "foo")
self.storage._save("foo", large_file)
self.assertTrue(start.called)
self.assertTrue(append.called)
self.assertTrue(finish.called)
self.assertFalse(upload.called)
@mock.patch(
"dropbox.Dropbox.files_get_temporary_link", return_value=FILE_MEDIA_MOCK
)
def test_url(self, *args):
url = self.storage.url("foo")
self.assertEqual(url, FILE_MEDIA_MOCK.link)
def test_formats(self, *args):
self.storage = dropbox.DropboxStorage("foo")
files = self.storage._full_path("")
self.assertEqual(files, self.storage._full_path("/"))
self.assertEqual(files, self.storage._full_path("."))
self.assertEqual(files, self.storage._full_path(".."))
self.assertEqual(files, self.storage._full_path("../.."))
| DropboxTest |
python | doocs__leetcode | solution/1500-1599/1559.Detect Cycles in 2D Grid/Solution2.py | {
"start": 0,
"end": 850
} | class ____:
def containsCycle(self, grid: List[List[str]]) -> bool:
def dfs(x: int, y: int, px: int, py: int) -> bool:
vis[x][y] = True
for dx, dy in pairwise(dirs):
nx, ny = x + dx, y + dy
if 0 <= nx < m and 0 <= ny < n:
if grid[nx][ny] != grid[x][y] or (nx == px and ny == py):
continue
if vis[nx][ny] or dfs(nx, ny, x, y):
return True
return False
m, n = len(grid), len(grid[0])
vis = [[False] * n for _ in range(m)]
dirs = (-1, 0, 1, 0, -1)
for i in range(m):
for j in range(n):
if vis[i][j]:
continue
if dfs(i, j, -1, -1):
return True
return False
| Solution |
python | coleifer__peewee | examples/blog/app.py | {
"start": 1961,
"end": 4857
} | class ____(flask_db.Model):
title = CharField()
slug = CharField(unique=True)
content = TextField()
published = BooleanField(index=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
@property
def html_content(self):
"""
Generate HTML representation of the markdown-formatted blog entry,
and also convert any media URLs into rich media objects such as video
players or images.
"""
hilite = CodeHiliteExtension(linenums=False, css_class='highlight')
extras = ExtraExtension()
markdown_content = markdown(self.content, extensions=[hilite, extras])
oembed_content = parse_html(
markdown_content,
oembed_providers,
urlize_all=True,
maxwidth=app.config['SITE_WIDTH'])
return Markup(oembed_content)
def save(self, *args, **kwargs):
# Generate a URL-friendly representation of the entry's title.
if not self.slug:
self.slug = re.sub(r'[^\w]+', '-', self.title.lower()).strip('-')
ret = super(Entry, self).save(*args, **kwargs)
# Store search content.
self.update_search_index()
return ret
def update_search_index(self):
# Create a row in the FTSEntry table with the post content. This will
# allow us to use SQLite's awesome full-text search extension to
# search our entries.
exists = (FTSEntry
.select(FTSEntry.docid)
.where(FTSEntry.docid == self.id)
.exists())
content = '\n'.join((self.title, self.content))
if exists:
(FTSEntry
.update({FTSEntry.content: content})
.where(FTSEntry.docid == self.id)
.execute())
else:
FTSEntry.insert({
FTSEntry.docid: self.id,
FTSEntry.content: content}).execute()
@classmethod
def public(cls):
return Entry.select().where(Entry.published == True)
@classmethod
def drafts(cls):
return Entry.select().where(Entry.published == False)
@classmethod
def search(cls, query):
words = [word.strip() for word in query.split() if word.strip()]
if not words:
# Return an empty query.
return Entry.noop()
else:
search = ' '.join(words)
# Query the full-text search index for entries matching the given
# search query, then join the actual Entry data on the matching
# search result.
return (Entry
.select(Entry, FTSEntry.rank().alias('score'))
.join(FTSEntry, on=(Entry.id == FTSEntry.docid))
.where(
FTSEntry.match(search) &
(Entry.published == True))
.order_by(SQL('score')))
| Entry |
python | PyCQA__pylint | pylint/testutils/output_line.py | {
"start": 936,
"end": 3994
} | class ____(NamedTuple):
symbol: str
lineno: int
column: int
end_lineno: int | None
end_column: int | None
object: str
msg: str
confidence: str
@classmethod
def from_msg(cls, msg: Message, check_endline: bool = True) -> OutputLine:
"""Create an OutputLine from a Pylint Message."""
column = cls._get_column(msg.column)
end_line = cls._get_end_line_and_end_col(msg.end_line, check_endline)
end_column = cls._get_end_line_and_end_col(msg.end_column, check_endline)
return cls(
msg.symbol,
msg.line,
column,
end_line,
end_column,
msg.obj or "",
msg.msg.replace("\r\n", "\n"),
msg.confidence.name,
)
@staticmethod
def _get_column(column: str | int) -> int:
"""Handle column numbers."""
return int(column)
@staticmethod
def _get_end_line_and_end_col(value: _T, check_endline: bool) -> _T | None:
"""Used to make end_line and end_column None as indicated by our version
compared to `min_pyver_end_position`.
"""
if not check_endline:
return None # pragma: no cover
return value
@classmethod
def from_csv(
cls, row: Sequence[str] | str, check_endline: bool = True
) -> OutputLine:
"""Create an OutputLine from a comma separated list (the functional tests
expected output .txt files).
"""
if isinstance(row, str):
row = row.split(",")
try:
line = int(row[1])
column = cls._get_column(row[2])
end_line = cls._value_to_optional_int(
cls._get_end_line_and_end_col(row[3], check_endline)
)
end_column = cls._value_to_optional_int(
cls._get_end_line_and_end_col(row[4], check_endline)
)
# symbol, line, column, end_line, end_column, node, msg, confidences
assert len(row) == 8
return cls(
row[0], line, column, end_line, end_column, row[5], row[6], row[7]
)
except Exception: # pylint: disable=broad-except
# We need this to not fail for the update script to work.
return cls("", 0, 0, None, None, "", "", "")
def to_csv(self) -> tuple[str, str, str, str, str, str, str, str]:
"""Convert an OutputLine to a tuple of string to be written by a
csv-writer.
"""
return (
str(self.symbol),
str(self.lineno),
str(self.column),
str(self.end_lineno),
str(self.end_column),
str(self.object),
str(self.msg),
str(self.confidence),
)
@staticmethod
def _value_to_optional_int(value: str | None) -> int | None:
"""Checks if a (stringified) value should be None or a Python integer."""
if value == "None" or not value:
return None
return int(value)
| OutputLine |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/parsers/host_config_parsers.py | {
"start": 4329,
"end": 6260
} | class ____(PairParser):
"""Composite argument parser for a POSIX remote host."""
def __init__(self, controller: bool) -> None:
self.controller = controller
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
return PosixRemoteConfig()
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
return NamespaceWrappedParser('name', PlatformParser(list(filter_completion(remote_completion(), controller_only=self.controller))))
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
return PosixRemoteKeyValueParser(choice, self.controller)
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
value: PosixRemoteConfig = super().parse(state)
if not value.python and not get_remote_pythons(value.name, self.controller, True):
raise ParserError(f'Python version required for remote: {value.name}')
return value
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
default = get_fallback_remote_controller()
content = '\n'.join([f' {name} ({", ".join(get_remote_pythons(name, self.controller, False))})'
for name, item in filter_completion(remote_completion(), controller_only=self.controller).items()])
content += '\n'.join([
'',
' {platform}/{version} # python must be specified for unknown systems',
])
state.sections[f'{"controller" if self.controller else "target"} remote systems and supported python versions (choose one):'] = content
return f'{{system}}[,{PosixRemoteKeyValueParser(default, self.controller).document(state)}]'
| PosixRemoteParser |
python | huggingface__transformers | src/transformers/models/pix2struct/modeling_pix2struct.py | {
"start": 10188,
"end": 11881
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Pix2StructConfig) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Pix2StructVisionAttention(config)
self.mlp = Pix2StructVisionMlp(config)
self.pre_mlp_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pre_attention_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
residual = hidden_states
# in Pix2StructVision, layernorm is applied before self-attention
hidden_states = self.pre_attention_layer_norm(hidden_states)
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# first residual connection
hidden_states = attention_output + residual
# in Pix2StructVision, layernorm is also applied after self-attention
layer_output = self.pre_mlp_layer_norm(hidden_states)
layer_output = self.mlp(layer_output) + hidden_states # second residual connection
outputs = (layer_output,) + outputs
return outputs
| Pix2StructVisionLayer |
python | spack__spack | lib/spack/spack/cmd/create.py | {
"start": 18808,
"end": 19168
} | class ____(PerlmakePackageTemplate):
"""Provides appropriate overrides for Perl extensions
that come with a Build.PL instead of a Makefile.PL"""
dependencies = """\
depends_on("perl-module-build", type="build")
# FIXME: Add additional dependencies if required:
# depends_on("perl-foo", type=("build", "run"))"""
| PerlbuildPackageTemplate |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/_etcd_stub.py | {
"start": 1555,
"end": 1998
} | class ____:
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise EtcdStubError
def read(self, key: str) -> None:
raise EtcdStubError
def write(
self, key: str, value: Any, ttl: int | None = None, **kwargs: Any
) -> None:
raise EtcdStubError
def test_and_set(
self, key: str, value: Any, prev_value: Any, ttl: int | None = None
) -> None:
raise EtcdStubError
| Client |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/transfers/test_mongo_to_s3.py | {
"start": 1786,
"end": 6484
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
self.mock_operator = MongoToS3Operator(
task_id=TASK_ID,
mongo_conn_id=MONGO_CONN_ID,
aws_conn_id=AWS_CONN_ID,
mongo_collection=MONGO_COLLECTION,
mongo_query=MONGO_QUERY,
s3_bucket=S3_BUCKET,
s3_key=S3_KEY,
dag=self.dag,
compression=COMPRESSION,
)
def test_init(self):
assert self.mock_operator.task_id == TASK_ID
assert self.mock_operator.mongo_conn_id == MONGO_CONN_ID
assert self.mock_operator.aws_conn_id == AWS_CONN_ID
assert self.mock_operator.mongo_collection == MONGO_COLLECTION
assert self.mock_operator.mongo_query == MONGO_QUERY
assert self.mock_operator.s3_bucket == S3_BUCKET
assert self.mock_operator.s3_key == S3_KEY
assert self.mock_operator.compression == COMPRESSION
def test_template_field_overrides(self):
assert self.mock_operator.template_fields == (
"s3_bucket",
"s3_key",
"mongo_query",
"mongo_collection",
)
@pytest.mark.db_test
def test_render_template(self, session, clean_dags_dagruns_and_dagbundles, testing_dag_bundle):
if AIRFLOW_V_3_0_PLUS:
from airflow.models.dag_version import DagVersion
sync_dag_to_db(self.dag)
dag_version = DagVersion.get_latest_version(self.mock_operator.dag_id)
ti = TaskInstance(self.mock_operator, dag_version_id=dag_version.id)
dag_run = DagRun(
dag_id=self.mock_operator.dag_id,
logical_date=DEFAULT_DATE,
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
else:
dag_run = DagRun(
dag_id=self.mock_operator.dag_id,
execution_date=DEFAULT_DATE,
run_id="test",
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
)
ti = TaskInstance(task=self.mock_operator)
ti.dag_run = dag_run
session.add(ti)
session.commit()
ti.render_templates()
expected_rendered_template = {"$lt": "2017-01-01T00:00:00+00:00Z"}
assert expected_rendered_template == getattr(self.mock_operator, "mongo_query")
@mock.patch("airflow.providers.amazon.aws.transfers.mongo_to_s3.MongoHook")
@mock.patch("airflow.providers.amazon.aws.transfers.mongo_to_s3.S3Hook")
def test_execute(self, mock_s3_hook, mock_mongo_hook):
operator = self.mock_operator
mock_mongo_hook.return_value.find.return_value = iter(MOCK_MONGO_RETURN)
mock_s3_hook.return_value.load_string.return_value = True
operator.execute(None)
mock_mongo_hook.return_value.find.assert_called_once_with(
mongo_collection=MONGO_COLLECTION,
query=MONGO_QUERY,
find_one=False,
mongo_db=None,
projection=None,
)
op_stringify = self.mock_operator._stringify
op_transform = self.mock_operator.transform
s3_doc_str = op_stringify(op_transform(MOCK_MONGO_RETURN))
mock_s3_hook.return_value.load_string.assert_called_once_with(
string_data=s3_doc_str, key=S3_KEY, bucket_name=S3_BUCKET, replace=False, compression=COMPRESSION
)
@mock.patch("airflow.providers.amazon.aws.transfers.mongo_to_s3.MongoHook")
@mock.patch("airflow.providers.amazon.aws.transfers.mongo_to_s3.S3Hook")
def test_execute_compress(self, mock_s3_hook, mock_mongo_hook):
operator = self.mock_operator
self.mock_operator.compression = "gzip"
mock_mongo_hook.return_value.find.return_value = iter(MOCK_MONGO_RETURN)
mock_s3_hook.return_value.load_string.return_value = True
operator.execute(None)
mock_mongo_hook.return_value.find.assert_called_once_with(
mongo_collection=MONGO_COLLECTION,
query=MONGO_QUERY,
find_one=False,
mongo_db=None,
projection=None,
)
op_stringify = self.mock_operator._stringify
op_transform = self.mock_operator.transform
s3_doc_str = op_stringify(op_transform(MOCK_MONGO_RETURN))
mock_s3_hook.return_value.load_string.assert_called_once_with(
string_data=s3_doc_str, key=S3_KEY, bucket_name=S3_BUCKET, replace=False, compression="gzip"
)
| TestMongoToS3Operator |
python | numpy__numpy | numpy/_core/tests/test_numerictypes.py | {
"start": 4775,
"end": 4913
} | class ____(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (nested)"""
_descr = Ndescr
| TestCreateZerosNested |
python | huggingface__transformers | src/transformers/models/fnet/modeling_fnet.py | {
"start": 15748,
"end": 19902
} | class ____(FNetPreTrainedModel):
"""
The model can behave as an encoder, following the architecture described in [FNet: Mixing Tokens with Fourier
Transforms](https://huggingface.co/papers/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
"""
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = FNetEmbeddings(config)
self.encoder = FNetEncoder(config)
self.pooler = FNetPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if (
self.config.use_tpu_fourier_optimizations
and seq_length <= 4096
and self.config.tpu_short_seq_length != seq_length
):
raise ValueError(
"The `tpu_short_seq_length` in FNetConfig should be set equal to the sequence length being passed to"
" the model when using TPU optimizations."
)
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooler_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooler_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
FNet Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
"""
)
| FNetModel |
python | chroma-core__chroma | chromadb/auth/__init__.py | {
"start": 868,
"end": 1611
} | class ____:
"""
UserIdentity represents the identity of a user. In general, not all fields
will be populated, and the fields that are populated will depend on the
authentication provider.
The idea is that the AuthenticationProvider is responsible for populating
_all_ information known about the user, and the AuthorizationProvider is
responsible for making decisions based on that information.
"""
user_id: str
tenant: Optional[str] = None
databases: Optional[List[str]] = None
# This can be used for any additional auth context which needs to be
# propagated from the authentication provider to the authorization
# provider.
attributes: Optional[Dict[str, Any]] = None
| UserIdentity |
python | walkccc__LeetCode | solutions/74. Search a 2D Matrix/74.py | {
"start": 0,
"end": 425
} | class ____:
def searchMatrix(self, matrix: list[list[int]], target: int) -> bool:
if not matrix:
return False
m = len(matrix)
n = len(matrix[0])
l = 0
r = m * n
while l < r:
mid = (l + r) // 2
i = mid // n
j = mid % n
if matrix[i][j] == target:
return True
if matrix[i][j] < target:
l = mid + 1
else:
r = mid
return False
| Solution |
python | davidhalter__jedi | jedi/plugins/stdlib.py | {
"start": 25216,
"end": 29997
} | class ____(ValueWrapper, FunctionMixin):
def __init__(self, func, original_function):
super().__init__(func)
self._original_function = original_function
@property
def name(self):
return self._original_function.name
def get_signature_functions(self):
return [self]
@argument_clinic('*args, /', want_value=True, want_arguments=True)
def _operator_itemgetter(args_value_set, value, arguments):
return ValueSet([
ItemGetterCallable(instance, args_value_set)
for instance in value.py__call__(arguments)
])
def _create_string_input_function(func):
@argument_clinic('string, /', want_value=True, want_arguments=True)
def wrapper(strings, value, arguments):
def iterate():
for value in strings:
s = get_str_or_none(value)
if s is not None:
s = func(s)
yield compiled.create_simple_object(value.inference_state, s)
values = ValueSet(iterate())
if values:
return values
return value.py__call__(arguments)
return wrapper
@argument_clinic('*args, /', want_callback=True)
def _os_path_join(args_set, callback):
if len(args_set) == 1:
string = ''
sequence, = args_set
is_first = True
for lazy_value in sequence.py__iter__():
string_values = lazy_value.infer()
if len(string_values) != 1:
break
s = get_str_or_none(next(iter(string_values)))
if s is None:
break
if not is_first:
string += os.path.sep
string += s
is_first = False
else:
return ValueSet([compiled.create_simple_object(sequence.inference_state, string)])
return callback()
_implemented = {
'builtins': {
'getattr': builtins_getattr,
'type': builtins_type,
'super': builtins_super,
'reversed': builtins_reversed,
'isinstance': builtins_isinstance,
'next': builtins_next,
'iter': builtins_iter,
'staticmethod': builtins_staticmethod,
'classmethod': builtins_classmethod,
'property': builtins_property,
},
'copy': {
'copy': _return_first_param,
'deepcopy': _return_first_param,
},
'json': {
'load': lambda value, arguments, callback: NO_VALUES,
'loads': lambda value, arguments, callback: NO_VALUES,
},
'collections': {
'namedtuple': collections_namedtuple,
},
'functools': {
'partial': functools_partial,
'partialmethod': functools_partialmethod,
'wraps': _functools_wraps,
},
'_weakref': {
'proxy': _return_first_param,
},
'random': {
'choice': _random_choice,
},
'operator': {
'itemgetter': _operator_itemgetter,
},
'abc': {
# Not sure if this is necessary, but it's used a lot in typeshed and
# it's for now easier to just pass the function.
'abstractmethod': _return_first_param,
},
'typing': {
# The _alias function just leads to some annoying type inference.
# Therefore, just make it return nothing, which leads to the stubs
# being used instead. This only matters for 3.7+.
'_alias': lambda value, arguments, callback: NO_VALUES,
# runtime_checkable doesn't really change anything and is just
# adding logs for infering stuff, so we can safely ignore it.
'runtime_checkable': lambda value, arguments, callback: NO_VALUES,
# Python 3.11+
'dataclass_transform': _dataclass_transform,
},
'typing_extensions': {
# Python <3.11
'dataclass_transform': _dataclass_transform,
},
'dataclasses': {
# For now this works at least better than Jedi trying to understand it.
'dataclass': _dataclass
},
'os.path': {
'dirname': _create_string_input_function(os.path.dirname),
'abspath': _create_string_input_function(os.path.abspath),
'relpath': _create_string_input_function(os.path.relpath),
'join': _os_path_join,
}
}
def get_metaclass_filters(func):
def wrapper(cls, metaclasses, is_instance):
for metaclass in metaclasses:
if metaclass.py__name__() == 'EnumMeta' \
and metaclass.get_root_context().py__name__() == 'enum':
filter_ = ParserTreeFilter(parent_context=cls.as_context())
return [DictFilter({
name.string_name: EnumInstance(cls, name).name
for name in filter_.values()
})]
return func(cls, metaclasses, is_instance)
return wrapper
| Wrapped |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/tasks.py | {
"start": 1500,
"end": 26340
} | class ____(GoogleBaseHook):
"""
Hook for Google Cloud Tasks APIs.
Cloud Tasks allows developers to manage the execution of background work in their applications.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._client: CloudTasksClient | None = None
def get_conn(self) -> CloudTasksClient:
"""
Provide a client for interacting with the Google Cloud Tasks API.
:return: Google Cloud Tasks API Client
"""
if self._client is None:
self._client = CloudTasksClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def create_queue(
self,
location: str,
task_queue: dict | Queue,
project_id: str = PROVIDE_PROJECT_ID,
queue_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Create a queue in Cloud Tasks.
:param location: The location name in which the queue will be created.
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if queue_name:
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue["name"] = full_queue_name
else:
raise AirflowException("Unable to set queue_name.")
full_location_path = f"projects/{project_id}/locations/{location}"
return client.create_queue(
request={"parent": full_location_path, "queue": task_queue},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_queue(
self,
task_queue: Queue,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
queue_name: str | None = None,
update_mask: FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Update a queue in Cloud Tasks.
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if queue_name and location:
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue["name"] = full_queue_name
else:
raise AirflowException("Unable to set queue_name.")
return client.update_queue(
request={"queue": task_queue, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Get a queue from Cloud Tasks.
:param location: The location name in which the queue was created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.get_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_queues(
self,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
results_filter: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[Queue]:
"""
List queues from Cloud Tasks.
:param location: The location name in which the queues were created.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param results_filter: (Optional) Filter used to specify a subset of queues.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_location_path = f"projects/{project_id}/locations/{location}"
queues = client.list_queues(
request={"parent": full_location_path, "filter": results_filter, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(queues)
@GoogleBaseHook.fallback_to_default_project_id
def delete_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete a queue from Cloud Tasks, even if it has tasks in it.
:param location: The location name in which the queue will be deleted.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
client.delete_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def purge_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Purges a queue by deleting all of its tasks from Cloud Tasks.
:param location: The location name in which the queue will be purged.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.purge_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def pause_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Pauses a queue in Cloud Tasks.
:param location: The location name in which the queue will be paused.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.pause_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def resume_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Resumes a queue in Cloud Tasks.
:param location: The location name in which the queue will be resumed.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.resume_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
location: str,
queue_name: str,
task: dict | Task,
project_id: str = PROVIDE_PROJECT_ID,
task_name: str | None = None,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Task:
"""
Create a task in Cloud Tasks.
:param location: The location name in which the task will be created.
:param queue_name: The queue's name.
:param task: The task to add.
If a dict is provided, it must be of the same form as the protobuf message Task.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param task_name: (Optional) The task's name.
If provided, it will be used to construct the full task path.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if task_name:
full_task_name = (
f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
)
if isinstance(task, Task):
task.name = full_task_name
elif isinstance(task, dict):
task["name"] = full_task_name
else:
raise AirflowException("Unable to set task_name.")
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.create_task(
request={"parent": full_queue_name, "task": task, "response_view": response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str = PROVIDE_PROJECT_ID,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Task:
"""
Get a task from Cloud Tasks.
:param location: The location name in which the task was created.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
return client.get_task(
request={"name": full_task_name, "response_view": response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
location: str,
queue_name: str,
project_id: str,
response_view: Task.View | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[Task]:
"""
List the tasks in Cloud Tasks.
:param location: The location name in which the tasks were created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
tasks = client.list_tasks(
request={"parent": full_queue_name, "response_view": response_view, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(tasks)
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete a task from Cloud Tasks.
:param location: The location name in which the task will be deleted.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
client.delete_task(
request={"name": full_task_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def run_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Task:
"""
Force run a task in Cloud Tasks.
:param location: The location name in which the task was created.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
return client.run_task(
request={"name": full_task_name, "response_view": response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
| CloudTasksHook |
python | getsentry__sentry | src/sentry/integrations/api/serializers/rest_framework/data_forwarder.py | {
"start": 9796,
"end": 12410
} | class ____(Serializer):
data_forwarder_id = serializers.IntegerField()
project = ProjectField(scope="project:write", id_allowed=True)
overrides = serializers.JSONField(default=dict)
is_enabled = serializers.BooleanField(default=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._validated_data_forwarder: DataForwarder | None = None
def validate_data_forwarder_id(self, value: int) -> int:
organization = self.context.get("organization")
if not organization:
raise ValidationError("Organization context is required")
try:
data_forwarder = DataForwarder.objects.get(id=value, organization=organization)
self._validated_data_forwarder = data_forwarder
except DataForwarder.DoesNotExist:
raise ValidationError("DataForwarder with this ID does not exist")
return value
def validate(self, attrs: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
project = attrs.get("project")
data_forwarder = self._validated_data_forwarder
if data_forwarder is None:
raise ValidationError("DataForwarder validation failed")
elif project is None:
raise ValidationError("Project validation failed")
elif data_forwarder.organization_id != project.organization_id:
raise ValidationError("DataForwarder and Project must belong to the same organization.")
existing = DataForwarderProject.objects.filter(
data_forwarder_id=data_forwarder.id,
project_id=project.id,
)
if self.instance:
existing = existing.exclude(id=self.instance.id)
if existing.exists():
raise ValidationError(
"A DataForwarderProject already exists for this data forwarder and project combination."
)
return attrs
def create(self, validated_data: MutableMapping[str, Any]) -> DataForwarderProject:
project = validated_data.pop("project")
validated_data["project_id"] = project.id
return DataForwarderProject.objects.create(**validated_data)
def update(
self, instance: DataForwarderProject, validated_data: MutableMapping[str, Any]
) -> DataForwarderProject:
project = validated_data.pop("project", None)
if project:
validated_data["project_id"] = project.id
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.save()
return instance
| DataForwarderProjectSerializer |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 7507,
"end": 7720
} | class ____(EllipticCurve):
name = "sect409k1"
key_size = 407
group_order = 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE5F83B2D4EA20400EC4557D5ED3E3E7CA5B4B5C83B8E01E5FCF # noqa: E501
| SECT409K1 |
python | dask__distributed | distributed/multi_lock.py | {
"start": 4844,
"end": 8044
} | class ____:
"""Distributed Centralized Lock
Parameters
----------
names
Names of the locks to acquire. Choosing the same name allows two
disconnected processes to coordinate a lock.
client
Client to use for communication with the scheduler. If not given, the
default global client will be used.
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout=1) # doctest: +SKIP
>>> # do things with protected resource 'x' and 'y'
>>> lock.release() # doctest: +SKIP
"""
def __init__(self, names: list[str] | None = None, client: Client | None = None):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.names = names or []
self.id = uuid.uuid4().hex
self._locked = False
def acquire(self, blocking=True, timeout=None, num_locks=None):
"""Acquire the lock
Parameters
----------
blocking : bool, optional
If false, don't wait on the lock in the scheduler at all.
timeout : string or number or timedelta, optional
Seconds to wait on the lock in the scheduler. This does not
include local coroutine time, network transfer time, etc..
It is forbidden to specify a timeout when blocking is false.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
num_locks : int, optional
Number of locks needed. If None, all locks are needed
Examples
--------
>>> lock = MultiLock(['x', 'y']) # doctest: +SKIP
>>> lock.acquire(timeout="1s") # doctest: +SKIP
Returns
-------
True or False whether or not it successfully acquired the lock
"""
timeout = parse_timedelta(timeout)
if not blocking:
if timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
timeout = 0
result = self.client.sync(
self.client.scheduler.multi_lock_acquire,
locks=self.names,
id=self.id,
timeout=timeout,
num_locks=num_locks or len(self.names),
)
self._locked = True
return result
def release(self):
"""Release the lock if already acquired"""
if not self.locked():
raise ValueError("Lock is not yet acquired")
ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id)
self._locked = False
return ret
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.release()
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.release()
def __reduce__(self):
return (type(self), (self.names,))
| MultiLock |
python | huggingface__transformers | tests/models/deformable_detr/test_modeling_deformable_detr.py | {
"start": 1448,
"end": 7118
} | class ____:
def __init__(
self,
parent,
batch_size=8,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=8,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
num_queries=12,
num_channels=3,
image_size=196,
n_targets=8,
num_labels=91,
num_feature_levels=4,
encoder_n_points=2,
decoder_n_points=6,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_queries = num_queries
self.num_channels = num_channels
self.image_size = image_size
self.n_targets = n_targets
self.num_labels = num_labels
self.num_feature_levels = num_feature_levels
self.encoder_n_points = encoder_n_points
self.decoder_n_points = decoder_n_points
# we also set the expected seq length for both encoder and decoder
self.encoder_seq_length = (
math.ceil(self.image_size / 8) ** 2
+ math.ceil(self.image_size / 16) ** 2
+ math.ceil(self.image_size / 32) ** 2
+ math.ceil(self.image_size / 64) ** 2
)
self.decoder_seq_length = self.num_queries
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device)
labels.append(target)
config = self.get_config()
return config, pixel_values, pixel_mask, labels
def get_config(self):
resnet_config = ResNetConfig(
num_channels=3,
embeddings_size=10,
hidden_sizes=[10, 20, 30, 40],
depths=[1, 1, 2, 1],
hidden_act="relu",
num_labels=3,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
)
return DeformableDetrConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
num_queries=self.num_queries,
num_labels=self.num_labels,
num_feature_levels=self.num_feature_levels,
encoder_n_points=self.encoder_n_points,
decoder_n_points=self.decoder_n_points,
use_timm_backbone=False,
backbone=None,
backbone_config=resnet_config,
use_pretrained_backbone=False,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def create_and_check_deformable_detr_model(self, config, pixel_values, pixel_mask, labels):
model = DeformableDetrModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size))
def create_and_check_deformable_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
model = DeformableDetrForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
| DeformableDetrModelTester |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_glue_catalog.py | {
"start": 1411,
"end": 7145
} | class ____:
def setup_method(self, method):
self.client = boto3.client("glue", region_name="us-east-1")
self.hook = GlueCatalogHook(region_name="us-east-1")
def test_get_conn_returns_a_boto3_connection(self):
hook = GlueCatalogHook(region_name="us-east-1")
assert hook.get_conn() is not None
def test_conn_id(self):
hook = GlueCatalogHook(aws_conn_id="my_aws_conn_id", region_name="us-east-1")
assert hook.aws_conn_id == "my_aws_conn_id"
def test_region(self):
hook = GlueCatalogHook(region_name="us-west-2")
assert hook.region_name == "us-west-2"
@mock.patch.object(GlueCatalogHook, "get_conn")
def test_get_partitions_empty(self, mock_get_conn):
response = set()
mock_get_conn.get_paginator.paginate.return_value = response
hook = GlueCatalogHook(region_name="us-east-1")
assert hook.get_partitions("db", "tbl") == set()
@mock.patch.object(GlueCatalogHook, "get_conn")
def test_get_partitions(self, mock_get_conn):
response = [{"Partitions": [{"Values": ["2015-01-01"]}]}]
mock_paginator = mock.Mock()
mock_paginator.paginate.return_value = response
mock_conn = mock.Mock()
mock_conn.get_paginator.return_value = mock_paginator
mock_get_conn.return_value = mock_conn
hook = GlueCatalogHook(region_name="us-east-1")
result = hook.get_partitions("db", "tbl", expression="foo=bar", page_size=2, max_items=3)
assert result == {("2015-01-01",)}
mock_conn.get_paginator.assert_called_once_with("get_partitions")
mock_paginator.paginate.assert_called_once_with(
DatabaseName="db",
TableName="tbl",
Expression="foo=bar",
PaginationConfig={"PageSize": 2, "MaxItems": 3},
)
@mock.patch.object(GlueCatalogHook, "get_partitions")
def test_check_for_partition(self, mock_get_partitions):
mock_get_partitions.return_value = {("2018-01-01",)}
hook = GlueCatalogHook(region_name="us-east-1")
assert hook.check_for_partition("db", "tbl", "expr")
mock_get_partitions.assert_called_once_with("db", "tbl", "expr", max_items=1)
@mock.patch.object(GlueCatalogHook, "get_partitions")
def test_check_for_partition_false(self, mock_get_partitions):
mock_get_partitions.return_value = set()
hook = GlueCatalogHook(region_name="us-east-1")
assert not hook.check_for_partition("db", "tbl", "expr")
def test_get_table_exists(self):
self.client.create_database(DatabaseInput={"Name": DB_NAME})
self.client.create_table(DatabaseName=DB_NAME, TableInput=TABLE_INPUT)
result = self.hook.get_table(DB_NAME, TABLE_NAME)
assert result["Name"] == TABLE_INPUT["Name"]
assert result["StorageDescriptor"]["Location"] == TABLE_INPUT["StorageDescriptor"]["Location"]
def test_get_table_not_exists(self):
self.client.create_database(DatabaseInput={"Name": DB_NAME})
self.client.create_table(DatabaseName=DB_NAME, TableInput=TABLE_INPUT)
with pytest.raises(botocore.exceptions.ClientError) as err_ctx:
self.hook.get_table(DB_NAME, "dummy_table")
assert err_ctx.value.response["Error"]["Code"] == "EntityNotFoundException"
def test_get_table_location(self):
self.client.create_database(DatabaseInput={"Name": DB_NAME})
self.client.create_table(DatabaseName=DB_NAME, TableInput=TABLE_INPUT)
result = self.hook.get_table_location(DB_NAME, TABLE_NAME)
assert result == TABLE_INPUT["StorageDescriptor"]["Location"]
def test_get_partition(self):
self.client.create_database(DatabaseInput={"Name": DB_NAME})
self.client.create_table(DatabaseName=DB_NAME, TableInput=TABLE_INPUT)
self.client.create_partition(
DatabaseName=DB_NAME, TableName=TABLE_NAME, PartitionInput=PARTITION_INPUT
)
result = self.hook.get_partition(DB_NAME, TABLE_NAME, PARTITION_INPUT["Values"])
assert result["Values"] == PARTITION_INPUT["Values"]
assert result["DatabaseName"] == DB_NAME
assert result["TableName"] == TABLE_INPUT["Name"]
@mock.patch.object(GlueCatalogHook, "get_conn")
def test_get_partition_with_client_error(self, mocked_connection):
mocked_client = mock.Mock()
mocked_client.get_partition.side_effect = ClientError({}, "get_partition")
mocked_connection.return_value = mocked_client
with pytest.raises(AirflowException):
self.hook.get_partition(DB_NAME, TABLE_NAME, PARTITION_INPUT["Values"])
mocked_client.get_partition.assert_called_once_with(
DatabaseName=DB_NAME, TableName=TABLE_NAME, PartitionValues=PARTITION_INPUT["Values"]
)
def test_create_partition(self):
self.client.create_database(DatabaseInput={"Name": DB_NAME})
self.client.create_table(DatabaseName=DB_NAME, TableInput=TABLE_INPUT)
result = self.hook.create_partition(DB_NAME, TABLE_NAME, PARTITION_INPUT)
assert result
@mock.patch.object(GlueCatalogHook, "get_conn")
def test_create_partition_with_client_error(self, mocked_connection):
mocked_client = mock.Mock()
mocked_client.create_partition.side_effect = ClientError({}, "create_partition")
mocked_connection.return_value = mocked_client
with pytest.raises(AirflowException):
self.hook.create_partition(DB_NAME, TABLE_NAME, PARTITION_INPUT)
mocked_client.create_partition.assert_called_once_with(
DatabaseName=DB_NAME, TableName=TABLE_NAME, PartitionInput=PARTITION_INPUT
)
| TestGlueCatalogHook |
python | cython__cython | Demos/benchmarks/bm_richards_cclass.py | {
"start": 6916,
"end": 7866
} | class ____(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self, pkt: Packet | None, r: WorkerTaskRec):
w = r
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # range(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
import time
def schedule():
t: Task = taskWorkArea.taskList
while t is not None:
pkt = None
if tracing:
print("tcb =", t.ident)
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing: trace(chr(ord("0")+t.ident))
t = t.runTask()
| WorkTask |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-google-sheets/destination_google_sheets/client.py | {
"start": 613,
"end": 2539
} | class ____:
logger = AirbyteLogger()
def __init__(self, config: Dict):
self.config = config
self.retries = 100 # max number of backoff retries
def authorize(self) -> pygsheets_client:
credentials = self.config.get("credentials", {})
auth_type = credentials.get("auth_type")
if auth_type == "service":
service_account_info = credentials.get("service_account_info")
if isinstance(service_account_info, str):
service_account_info = json.loads(service_account_info)
auth_creds = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES)
else:
input_creds = self.config.get("credentials")
auth_creds = client_account.Credentials.from_authorized_user_info(info=input_creds)
try:
client = pygsheets.authorize(custom_credentials=auth_creds)
except Exception as e:
self.logger.error(f"Failed to authorize with service account. Error: {str(e)}")
self.logger.error(f"Error type: {type(e)}")
raise
# Increase max number of retries if Rate Limit is reached. Error: <HttpError 429>
client.drive.retries = self.retries # for google drive api
client.sheet.retries = self.retries # for google sheets api
# Only check token expiration for OAuth authentication
if auth_type != "service":
if client.oauth.expired:
self.logger.info("Auth session is expired. Refreshing...")
client.oauth.refresh(Request())
if not client.oauth.expired:
self.logger.info("Successfully refreshed auth session")
else:
self.logger.fatal("The token is expired and could not be refreshed, please check the credentials are still valid!")
return client
| GoogleSheetsClient |
python | django__django | tests/test_utils/tests.py | {
"start": 83536,
"end": 85509
} | class ____(SimpleTestCase):
databases = {"default"}
def test_allowed_database_queries(self):
Car.objects.first()
def test_allowed_database_chunked_cursor_queries(self):
next(Car.objects.iterator(), None)
def test_allowed_threaded_database_queries(self):
connections_dict = {}
def thread_func():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections["default"]
next(Car.objects.iterator(), None)
# Allow thread sharing so the connection can be closed by the main
# thread.
connection.inc_thread_sharing()
connections_dict[id(connection)] = connection
try:
t = threading.Thread(target=thread_func)
t.start()
t.join()
finally:
# Finish by closing the connections opened by the other threads
# (the connection opened in the main thread will automatically be
# closed on teardown).
for conn in connections_dict.values():
if conn is not connection and conn.allow_thread_sharing:
conn.validate_thread_sharing()
conn._close()
conn.dec_thread_sharing()
def test_allowed_database_copy_queries(self):
new_connection = connection.copy("dynamic_connection")
try:
with new_connection.cursor() as cursor:
sql = f"SELECT 1{new_connection.features.bare_select_suffix}"
cursor.execute(sql)
self.assertEqual(cursor.fetchone()[0], 1)
finally:
new_connection.validate_thread_sharing()
new_connection._close()
if hasattr(new_connection, "close_pool"):
new_connection.close_pool()
| AllowedDatabaseQueriesTests |
python | getsentry__sentry | src/sentry/api/serializers/release_details_types.py | {
"start": 66,
"end": 140
} | class ____(TypedDict, total=False):
description: str
| VersionInfoOptional |
python | getsentry__sentry | tests/sentry/relocation/api/endpoints/artifacts/test_details.py | {
"start": 1953,
"end": 6491
} | class ____(GetRelocationArtifactDetailsTest):
def setUp(self) -> None:
super().setUp()
dir = f"runs/{self.relocation.uuid}"
self.relocation_storage = get_relocation_storage()
# These files are unencrypted, so just save the file name as the content for testing
# purposes.
self.relocation_storage.save(
f"{dir}/somedir/file.json", StringIO(f'"{dir}/somedir/file.json"')
)
# `.tar` files should be encrypted.
with TemporaryDirectory() as tmp_dir:
(priv_key_pem, pub_key_pem) = generate_rsa_key_pair()
tmp_priv_key_path = Path(tmp_dir).joinpath("key")
self.priv_key_pem = priv_key_pem
with open(tmp_priv_key_path, "wb") as f:
f.write(priv_key_pem)
tmp_pub_key_path = Path(tmp_dir).joinpath("key.pub")
self.pub_key_pem = pub_key_pem
with open(tmp_pub_key_path, "wb") as f:
f.write(pub_key_pem)
with open(tmp_pub_key_path, "rb") as p:
self.tarball = create_encrypted_export_tarball(
f"{dir}/encrypted/file.tar", LocalFileEncryptor(p)
).getvalue()
self.relocation_storage.save(f"{dir}/encrypted/file.tar", BytesIO(self.tarball))
def mock_kms_client(self, fake_kms_client: mock.Mock) -> None:
unwrapped = unwrap_encrypted_export_tarball(BytesIO(self.tarball))
plaintext_dek = LocalFileDecryptor.from_bytes(
self.priv_key_pem
).decrypt_data_encryption_key(unwrapped)
fake_kms_client.return_value.asymmetric_decrypt.return_value = SimpleNamespace(
plaintext=plaintext_dek,
plaintext_crc32c=crc32c(plaintext_dek),
)
fake_kms_client.return_value.get_public_key.return_value = SimpleNamespace(
pem=self.pub_key_pem.decode("utf-8")
)
@patch("sentry.backup.crypto.KeyManagementServiceClient")
def test_good_unencrypted_with_superuser(self, fake_kms_client: mock.Mock) -> None:
self.mock_kms_client(fake_kms_client)
self.add_user_permission(self.superuser, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(str(self.relocation.uuid), "somedir", "file.json")
assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 0
assert (
response.data["contents"] == f'"runs/{self.relocation.uuid}/somedir/file.json"'.encode()
)
@patch("sentry.backup.crypto.KeyManagementServiceClient")
def test_good_encrypted_with_superuser(self, fake_kms_client: mock.Mock) -> None:
self.mock_kms_client(fake_kms_client)
self.add_user_permission(self.superuser, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.superuser, superuser=True)
response = self.get_success_response(str(self.relocation.uuid), "encrypted", "file.tar")
assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 1
assert str(response.data["contents"]) == f'"runs/{self.relocation.uuid}/encrypted/file.tar"'
@override_options({"staff.ga-rollout": True})
@patch("sentry.backup.crypto.KeyManagementServiceClient")
def test_good_unencrypted_with_staff(self, fake_kms_client: mock.Mock) -> None:
self.mock_kms_client(fake_kms_client)
self.add_user_permission(self.staff_user, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.staff_user, staff=True)
response = self.get_success_response(str(self.relocation.uuid), "somedir", "file.json")
assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 0
assert (
response.data["contents"] == f'"runs/{self.relocation.uuid}/somedir/file.json"'.encode()
)
@override_options({"staff.ga-rollout": True})
@patch("sentry.backup.crypto.KeyManagementServiceClient")
def test_good_encrypted_with_staff(self, fake_kms_client: mock.Mock) -> None:
self.mock_kms_client(fake_kms_client)
self.add_user_permission(self.staff_user, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.staff_user, staff=True)
response = self.get_success_response(str(self.relocation.uuid), "encrypted", "file.tar")
assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 1
assert str(response.data["contents"]) == f'"runs/{self.relocation.uuid}/encrypted/file.tar"'
| GetRelocationArtifactDetailsGoodTest |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 40512,
"end": 41283
} | class ____(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
action: Annotated[
Literal["update"], Field(description="The action to be performed on the entities.", title="Action")
]
entities: Annotated[
list[VariableBody], Field(description="A list of entities to be updated.", title="Entities")
]
update_mask: Annotated[
list[str] | None,
Field(
description="A list of field names to update for each entity.Only these fields will be applied from the request body to the database model.Any extra fields provided will be ignored.",
title="Update Mask",
),
] = None
action_on_non_existence: BulkActionNotOnExistence | None = "fail"
| BulkUpdateActionVariableBody |
python | huggingface__transformers | src/transformers/models/idefics3/modeling_idefics3.py | {
"start": 17799,
"end": 18370
} | class ____(PreTrainedModel):
config: Idefics3Config
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["Idefics3VisionAttention", "Idefics3DecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
@auto_docstring(
custom_intro="""
The Idefics3 Vision Transformer Model outputting raw image embedding.
"""
)
| Idefics3PreTrainedModel |
python | ray-project__ray | python/ray/data/_internal/logical/rules/limit_pushdown.py | {
"start": 427,
"end": 8102
} | class ____(Rule):
"""Rule for pushing down the limit operator.
When a limit operator is present, we apply the limit on the
most upstream operator that supports it. We are conservative and only
push through operators that we know for certain do not modify row counts:
- Project operations (column selection)
- MapRows operations (row-wise transformations that preserve row count)
- Union operations (limits are prepended to each branch)
We stop at:
- Any operator that can modify the number of output rows (Sort, Shuffle, Aggregate, Read etc.)
For per-block limiting, we also set per-block limits on Read operators to optimize
I/O while keeping the Limit operator for exact row count control.
In addition, we also fuse consecutive Limit operators into a single
Limit operator, i.e. `Limit[n] -> Limit[m]` becomes `Limit[min(n, m)]`.
"""
def apply(self, plan: LogicalPlan) -> LogicalPlan:
# The DAG's root is the most downstream operator.
def transform(node: LogicalOperator) -> LogicalOperator:
if isinstance(node, Limit):
# First, try to fuse with upstream Limit if possible (reuse fusion logic)
upstream_op = node.input_dependency
if isinstance(upstream_op, Limit):
# Fuse consecutive Limits: Limit[n] -> Limit[m] becomes Limit[min(n,m)]
new_limit = min(node._limit, upstream_op._limit)
return Limit(upstream_op.input_dependency, new_limit)
# If no fusion, apply pushdown logic
if isinstance(upstream_op, Union):
return self._push_limit_into_union(node)
else:
return self._push_limit_down(node)
return node
optimized_dag = plan.dag._apply_transform(transform)
return LogicalPlan(dag=optimized_dag, context=plan.context)
def _apply_limit_pushdown(self, op: LogicalOperator) -> LogicalOperator:
"""Push down Limit operators in the given operator DAG.
This implementation uses ``LogicalOperator._apply_transform`` to
post-order-traverse the DAG and rewrite each ``Limit`` node via
:py:meth:`_push_limit_down`.
"""
def transform(node: LogicalOperator) -> LogicalOperator:
if isinstance(node, Limit):
if isinstance(node.input_dependency, Union):
return self._push_limit_into_union(node)
return self._push_limit_down(node)
return node
# ``_apply_transform`` returns the (potentially new) root of the DAG.
return op._apply_transform(transform)
def _push_limit_into_union(self, limit_op: Limit) -> Limit:
"""Push `limit_op` INTO every branch of its upstream Union
and preserve the global limit.
Existing topology:
child₁ , child₂ , … -> Union -> Limit
New topology:
child₁ -> Limit ->│
│
child₂ -> Limit ->┤ Union ──► Limit (original)
│
… -> Limit ->│
"""
union_op = limit_op.input_dependency
assert isinstance(union_op, Union)
# 1. Detach the original Union from its children.
original_children = list(union_op.input_dependencies)
for child in original_children:
if union_op in child._output_dependencies:
child._output_dependencies.remove(union_op)
# 2. Insert a branch-local Limit and push it further upstream.
branch_tails: List[LogicalOperator] = []
for child in original_children:
raw_limit = Limit(child, limit_op._limit) # child → limit
if isinstance(child, Union):
# This represents the limit operator appended after the union.
pushed_tail = self._push_limit_into_union(raw_limit)
else:
# This represents the operator that takes place of the original limit position.
pushed_tail = self._push_limit_down(raw_limit)
branch_tails.append(pushed_tail)
# 3. Re-attach the Union so that it consumes the *tails*.
new_union = Union(*branch_tails)
for tail in branch_tails:
tail._output_dependencies.append(new_union)
# 4. Re-wire the original (global) Limit to consume the *new* Union.
limit_op._input_dependencies = [new_union]
new_union._output_dependencies = [limit_op]
return limit_op
def _push_limit_down(self, limit_op: Limit) -> LogicalOperator:
"""Push a single limit down through compatible operators conservatively.
Creates entirely new operators instead of mutating existing ones.
"""
# Traverse up the DAG until we reach the first operator that meets
# one of the stopping conditions
current_op = limit_op.input_dependency
num_rows_preserving_ops: List[LogicalOperator] = []
while (
isinstance(current_op, AbstractOneToOne)
and not current_op.can_modify_num_rows()
):
if isinstance(current_op, AbstractMap):
min_rows = current_op._min_rows_per_bundled_input
if min_rows is not None and min_rows > limit_op._limit:
# Avoid pushing the limit past batch-based maps that require more
# rows than the limit to produce stable outputs (e.g. schema).
logger.info(
f"Skipping push down of limit {limit_op._limit} through map {current_op} because it requires {min_rows} rows to produce stable outputs"
)
break
num_rows_preserving_ops.append(current_op)
current_op = current_op.input_dependency
# If we couldn't push through any operators, return original
if not num_rows_preserving_ops:
return limit_op
# Apply per-block limit to the deepest operator if it supports it
limit_input = self._apply_per_block_limit_if_supported(
current_op, limit_op._limit
)
# Build the new operator chain: Chain non-preserving number of rows -> Limit -> Operators preserving number of rows
new_limit = Limit(limit_input, limit_op._limit)
result_op = new_limit
# Recreate the intermediate operators and apply per-block limits
for op_to_recreate in reversed(num_rows_preserving_ops):
recreated_op = self._recreate_operator_with_new_input(
op_to_recreate, result_op
)
result_op = recreated_op
return result_op
def _apply_per_block_limit_if_supported(
self, op: LogicalOperator, limit: int
) -> LogicalOperator:
"""Apply per-block limit to operators that support it."""
if isinstance(op, AbstractMap):
new_op = copy.copy(op)
new_op.set_per_block_limit(limit)
return new_op
return op
def _recreate_operator_with_new_input(
self, original_op: LogicalOperator, new_input: LogicalOperator
) -> LogicalOperator:
"""Create a new operator of the same type as original_op but with new_input as its input."""
if isinstance(original_op, Limit):
return Limit(new_input, original_op._limit)
# Use copy and replace input dependencies approach
new_op = copy.copy(original_op)
new_op._input_dependencies = [new_input]
new_op._output_dependencies = []
return new_op
| LimitPushdownRule |
python | django__django | tests/migrations/test_multidb.py | {
"start": 368,
"end": 532
} | class ____:
"""
A router that doesn't allow migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return False
| MigrateNothingRouter |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 59888,
"end": 63121
} | class ____:
def test_allclose(self, xp):
"""Test for false positive on allclose in normalize() in
filter_design.py"""
# Test to make sure the allclose call within signal.normalize does not
# choose false positives. Then check against a known output from MATLAB
# to make sure the fix doesn't break anything.
# These are the coefficients returned from
# `[b,a] = cheby1(8, 0.5, 0.048)'
# in MATLAB. There are at least 15 significant figures in each
# coefficient, so it makes sense to test for errors on the order of
# 1e-13 (this can always be relaxed if different platforms have
# different rounding errors)
b_matlab = xp.asarray([2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11])
a_matlab = xp.asarray([1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01])
# This is the input to signal.normalize after passing through the
# equivalent steps in signal.iirfilter as was done for MATLAB
b_norm_in = xp.asarray([1.5543135865293012e-06, 1.2434508692234413e-05,
4.3520780422820447e-05, 8.7041560845640893e-05,
1.0880195105705122e-04, 8.7041560845640975e-05,
4.3520780422820447e-05, 1.2434508692234413e-05,
1.5543135865293012e-06])
a_norm_in = xp.asarray([7.2269025909127173e+04, -5.6242661430467968e+05,
1.9182761917308895e+06, -3.7451128364682454e+06,
4.5776121393762771e+06, -3.5869706138592605e+06,
1.7596511818472347e+06, -4.9409793515707983e+05,
6.0799461347219651e+04])
b_output, a_output = normalize(b_norm_in, a_norm_in)
# The test on b works for decimal=14 but the one for a does not. For
# the sake of consistency, both of these are decimal=13. If something
# breaks on another platform, it is probably fine to relax this lower.
decimal = 13 if xp_default_dtype(xp) == xp.float64 else 5
assert_array_almost_equal(b_matlab, b_output, decimal=decimal)
assert_array_almost_equal(a_matlab, a_output, decimal=decimal)
def test_errors(self):
"""Test the error cases."""
# all zero denominator
assert_raises(ValueError, normalize, [1, 2], 0)
# denominator not 1 dimensional
assert_raises(ValueError, normalize, [1, 2], [[1]])
# numerator too many dimensions
assert_raises(ValueError, normalize, [[[1, 2]]], 1)
@make_xp_test_case(lp2lp)
| TestNormalize |
python | simonw__datasette | datasette/database.py | {
"start": 25460,
"end": 26192
} | class ____:
def __init__(self, rows, truncated, description):
self.rows = rows
self.truncated = truncated
self.description = description
@property
def columns(self):
return [d[0] for d in self.description]
def first(self):
if self.rows:
return self.rows[0]
else:
return None
def single_value(self):
if self.rows and 1 == len(self.rows) and 1 == len(self.rows[0]):
return self.rows[0][0]
else:
raise MultipleValues
def dicts(self):
return [dict(row) for row in self.rows]
def __iter__(self):
return iter(self.rows)
def __len__(self):
return len(self.rows)
| Results |
python | ipython__ipython | tests/test_ipunittest.py | {
"start": 2764,
"end": 3365
} | class ____(object):
"""For methods, the normal decorator doesn't work.
But rewriting the docstring with ip2py does, *but only if using nose
--with-doctest*. Do we want to have that as a dependency?
"""
@ipdocstring
def ipdt_method(self):
"""
In [20]: print(1)
1
In [26]: for i in range(4):
....: print(i)
....:
....:
0
1
2
3
In [27]: 3+4
Out[27]: 7
"""
def normaldt_method(self):
"""
>>> print(1+1)
2
"""
| Foo |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_state_dict.py | {
"start": 4280,
"end": 4838
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(0)
self.net1 = nn.Sequential(nn.Linear(8, 16), nn.ReLU())
self.net2 = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
self.net3 = self.net2
self.random_parameter = nn.Parameter(torch.Tensor(10))
self.shared_parameter = self.random_parameter
def forward(self, x):
return self.net3(self.net2(self.net1(x)))
def get_input(self):
return torch.rand(8, 8, device=device_type)
| TestDummyModel |
python | ray-project__ray | python/ray/tune/tests/_test_trial_runner_callbacks.py | {
"start": 649,
"end": 1765
} | class ____(Callback):
def __init__(self):
self.state = OrderedDict()
def setup(self, **info):
self.state["setup"] = info
def on_step_begin(self, **info):
self.state["step_begin"] = info
def on_step_end(self, **info):
self.state["step_end"] = info
def on_trial_start(self, **info):
self.state["trial_start"] = info
def on_trial_restore(self, **info):
self.state["trial_restore"] = info
def on_trial_save(self, **info):
self.state["trial_save"] = info
def on_trial_result(self, **info):
self.state["trial_result"] = info
result = info["result"]
trial = info["trial"]
assert result.get(TRAINING_ITERATION, None) != trial.last_result.get(
TRAINING_ITERATION, None
)
def on_trial_complete(self, **info):
self.state["trial_complete"] = info
def on_trial_error(self, **info):
self.state["trial_fail"] = info
def on_experiment_end(self, **info):
self.state["experiment_end"] = info
# TODO(xwjiang): Move this to a testing util.
| TestCallback |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 169538,
"end": 174341
} | class ____:
@pytest.mark.parametrize(
"args, kwargs",
[
((42, 42), {}),
([42, 42], {}),
((), {"x": 42, "y": 42}),
([42], {"y": 42}),
],
)
async def test_with_args_kwargs(self, args, kwargs):
@task
def multiply(x, y):
return x * y
future = multiply.apply_async(args, kwargs)
assert await get_background_task_run_parameters(
multiply, future.state.state_details.task_parameters_id
) == {"parameters": {"x": 42, "y": 42}, "context": ANY}
def test_with_duplicate_values(self):
@task
def add(x, y):
return x + y
with pytest.raises(
ParameterBindError, match="multiple values for argument 'x'"
):
add.apply_async((42,), {"x": 42})
def test_missing_values(self):
@task
def add(x, y):
return x + y
with pytest.raises(
ParameterBindError, match="missing a required argument: 'y'"
):
add.apply_async((42,))
async def test_handles_default_values(self):
@task
def add(x, y=42):
return x + y
future = add.apply_async((42,))
assert await get_background_task_run_parameters(
add, future.state.state_details.task_parameters_id
) == {"parameters": {"x": 42, "y": 42}, "context": ANY}
async def test_overrides_defaults(self):
@task
def add(x, y=42):
return x + y
future = add.apply_async((42,), {"y": 100})
assert await get_background_task_run_parameters(
add, future.state.state_details.task_parameters_id
) == {"parameters": {"x": 42, "y": 100}, "context": ANY}
async def test_with_variadic_args(self):
@task
def add_em_up(*args):
return sum(args)
future = add_em_up.apply_async((42, 42))
assert await get_background_task_run_parameters(
add_em_up, future.state.state_details.task_parameters_id
) == {"parameters": {"args": (42, 42)}, "context": ANY}
async def test_with_variadic_kwargs(self):
@task
def add_em_up(**kwargs):
return sum(kwargs.values())
future = add_em_up.apply_async(kwargs={"x": 42, "y": 42})
assert await get_background_task_run_parameters(
add_em_up, future.state.state_details.task_parameters_id
) == {"parameters": {"kwargs": {"x": 42, "y": 42}}, "context": ANY}
async def test_with_variadic_args_and_kwargs(self):
@task
def add_em_up(*args, **kwargs):
return sum(args) + sum(kwargs.values())
future = add_em_up.apply_async((42,), {"y": 42})
assert await get_background_task_run_parameters(
add_em_up, future.state.state_details.task_parameters_id
) == {"parameters": {"args": (42,), "kwargs": {"y": 42}}, "context": ANY}
async def test_with_wait_for(self):
task_run_id = uuid4()
wait_for_future = PrefectDistributedFuture(task_run_id=task_run_id)
@task
def multiply(x, y):
return x * y
future = multiply.apply_async((42, 42), wait_for=[wait_for_future])
assert await get_background_task_run_parameters(
multiply, future.state.state_details.task_parameters_id
) == {
"parameters": {"x": 42, "y": 42},
"wait_for": [wait_for_future],
"context": ANY,
}
async def test_with_only_wait_for(self):
task_run_id = uuid4()
wait_for_future = PrefectDistributedFuture(task_run_id=task_run_id)
@task
def the_answer():
return 42
future = the_answer.apply_async(wait_for=[wait_for_future])
assert await get_background_task_run_parameters(
the_answer, future.state.state_details.task_parameters_id
) == {"wait_for": [wait_for_future], "context": ANY}
async def test_with_dependencies(self, prefect_client):
task_run_id = uuid4()
@task
def add(x, y):
return x + y
future = add.apply_async(
(42, 42), dependencies={"x": {TaskRunResult(id=task_run_id)}}
)
task_run = await prefect_client.read_task_run(future.task_run_id)
assert task_run.task_inputs == {
"x": [TaskRunResult(id=task_run_id)],
"y": [],
}
def test_apply_async_emits_run_ui_url(self, caplog):
@task
def add(x, y):
return x + y
with temporary_settings({PREFECT_UI_URL: "http://test/api"}):
add.apply_async((42, 42))
assert "in the UI at 'http://test/api/runs/task-run/" in caplog.text
| TestApplyAsync |
python | ray-project__ray | python/ray/data/aggregate.py | {
"start": 11229,
"end": 13328
} | class ____(AggregateFnV2[int, int]):
"""Defines count aggregation.
Example:
.. testcode::
import ray
from ray.data.aggregate import Count
ds = ray.data.range(100)
# Schema: {'id': int64}
ds = ds.add_column("group_key", lambda x: x % 3)
# Schema: {'id': int64, 'group_key': int64}
# Counting all rows:
result = ds.aggregate(Count())
# result: {'count()': 100}
# Counting all rows per group:
result = ds.groupby("group_key").aggregate(Count(on="id")).take_all()
# result: [{'group_key': 0, 'count(id)': 34},
# {'group_key': 1, 'count(id)': 33},
# {'group_key': 2, 'count(id)': 33}]
Args:
on: Optional name of the column to count values on. If None, counts rows.
ignore_nulls: Whether to ignore null values when counting. Only applies if
`on` is specified. Default is `False` which means `Count()` on a column
will count nulls by default. To match pandas default behavior of not counting nulls,
set `ignore_nulls=True`.
alias_name: Optional name for the resulting column.
"""
def __init__(
self,
on: Optional[str] = None,
ignore_nulls: bool = False,
alias_name: Optional[str] = None,
):
super().__init__(
alias_name if alias_name else f"count({on or ''})",
on=on,
ignore_nulls=ignore_nulls,
zero_factory=lambda: 0,
)
def aggregate_block(self, block: Block) -> int:
block_accessor = BlockAccessor.for_block(block)
if self._target_col_name is None:
# In case of global count, simply fetch number of rows
return block_accessor.num_rows()
return block_accessor.count(
self._target_col_name, ignore_nulls=self._ignore_nulls
)
def combine(self, current_accumulator: int, new: int) -> int:
return current_accumulator + new
@PublicAPI
| Count |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 28806,
"end": 30306
} | class ____(TokenStream):
def __init__(self):
self._input = None
self._stmap = {}
self._stack = []
def addInputStream(self,stream,key):
self._stmap[key] = stream
def getCurrentStream(self):
return self._input
def getStream(self,sname):
try:
stream = self._stmap[sname]
except:
raise ValueError("TokenStream " + sname + " not found");
return stream;
def nextToken(self):
while 1:
try:
return self._input.nextToken()
except TokenStreamRetryException:
### just retry "forever"
pass
def pop(self):
stream = self._stack.pop();
self.select(stream);
return stream;
def push(self,arg):
self._stack.append(self._input);
self.select(arg)
def retry(self):
raise TokenStreamRetryException()
def select(self,arg):
if isinstance(arg,TokenStream):
self._input = arg
return
if is_string_type(arg):
self._input = self.getStream(arg)
return
raise TypeError("TokenStreamSelector.select requires " +
"TokenStream or string argument")
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStreamBasicFilter ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| TokenStreamSelector |
python | django__django | django/db/models/functions/text.py | {
"start": 9711,
"end": 9842
} | class ____(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = "SHA384"
lookup_name = "sha384"
| SHA384 |
python | rapidsai__cudf | python/cudf/cudf/core/buffer/spill_manager.py | {
"start": 1403,
"end": 6059
} | class ____:
"""Gather spill statistics
Levels of information gathered:
0 - disabled (no overhead).
1+ - duration and number of bytes spilled (very low overhead).
2+ - a traceback for each time a spillable buffer is exposed
permanently (potential high overhead).
The statistics are printed when spilling-on-demand fails to find
any buffer to spill. It is possible to retrieve the statistics
manually through the spill manager, see example below.
Parameters
----------
level : int
If not 0, enables statistics at the specified level.
Examples
--------
>>> import cudf
>>> from cudf.core.buffer.spill_manager import get_global_manager
>>> manager = get_global_manager()
>>> manager.statistics
<SpillStatistics level=1>
>>> df = cudf.DataFrame({"a": [1,2,3]})
>>> manager.spill_to_device_limit(1) # Spill df
24
>>> print(get_global_manager().statistics)
Spill Statistics (level=1):
Spilling (level >= 1):
gpu => cpu: 24B in 0.0033579860000827466s
"""
@dataclass
class Expose:
traceback: str
count: int = 1
total_nbytes: int = 0
spilled_nbytes: int = 0
spill_totals: dict[tuple[str, str], tuple[int, float]]
def __init__(self, level) -> None:
self.lock = threading.Lock()
self.level = level
self.spill_totals = defaultdict(lambda: (0, 0))
# Maps each traceback to a Expose
self.exposes: dict[str, SpillStatistics.Expose] = {}
def log_spill(self, src: str, dst: str, nbytes: int, time: float) -> None:
"""Log a (un-)spilling event
Parameters
----------
src : str
The memory location before spilling.
dst : str
The memory location after spilling.
nbytes : int
Number of bytes (un-)spilled.
nbytes : float
Elapsed time the event took in seconds.
"""
if self.level < 1:
return
with self.lock:
total_nbytes, total_time = self.spill_totals[(src, dst)]
self.spill_totals[(src, dst)] = (
total_nbytes + nbytes,
total_time + time,
)
def log_expose(self, buf: SpillableBufferOwner) -> None:
"""Log an expose event
We track logged exposes by grouping them by their traceback such
that `self.exposes` maps tracebacks (as strings) to their logged
data (as `Expose`).
Parameters
----------
buf : spillabe-buffer
The buffer being exposed.
"""
if self.level < 2:
return
with self.lock:
tb = get_traceback()
stat = self.exposes.get(tb, None)
spilled_nbytes = buf.nbytes if buf.is_spilled else 0
if stat is None:
self.exposes[tb] = self.Expose(
traceback=tb,
total_nbytes=buf.nbytes,
spilled_nbytes=spilled_nbytes,
)
else:
stat.count += 1
stat.total_nbytes += buf.nbytes
stat.spilled_nbytes += spilled_nbytes
def __repr__(self) -> str:
return f"<SpillStatistics level={self.level}>"
def __str__(self) -> str:
with self.lock:
ret = f"Spill Statistics (level={self.level}):\n"
if self.level == 0:
return ret[:-1] + " N/A"
# Print spilling stats
ret += " Spilling (level >= 1):"
if len(self.spill_totals) == 0:
ret += " None"
ret += "\n"
for (src, dst), (nbytes, time) in self.spill_totals.items():
ret += f" {src} => {dst}: "
ret += f"{format_bytes(nbytes)} in {time:.3f}s\n"
# Print expose stats
ret += " Exposed buffers (level >= 2): "
if self.level < 2:
return ret + "disabled"
if len(self.exposes) == 0:
ret += "None"
ret += "\n"
for s in sorted(self.exposes.values(), key=lambda x: -x.count):
ret += textwrap.indent(
(
f"exposed {s.count} times, "
f"total: {format_bytes(s.total_nbytes)}, "
f"spilled: {format_bytes(s.spilled_nbytes)}, "
f"traceback:\n{s.traceback}"
),
prefix=" " * 4,
)
return ret[:-1] # Remove last `\n`
| SpillStatistics |
python | donnemartin__interactive-coding-challenges | stacks_queues/stack/stack.py | {
"start": 114,
"end": 559
} | class ____(object):
def __init__(self, top=None):
self.top = top
def push(self, data):
self.top = Node(data, self.top)
def pop(self):
if self.top is None:
return None
data = self.top.data
self.top = self.top.next
return data
def peek(self):
return self.top.data if self.top is not None else None
def is_empty(self):
return self.peek() is None
| Stack |
python | zarr-developers__zarr-python | tests/test_store/test_fsspec.py | {
"start": 4351,
"end": 15905
} | class ____(StoreTests[FsspecStore, cpu.Buffer]):
store_cls = FsspecStore
buffer_cls = cpu.Buffer
@pytest.fixture
def store_kwargs(self) -> dict[str, str | bool]:
try:
from fsspec import url_to_fs
except ImportError:
# before fsspec==2024.3.1
from fsspec.core import url_to_fs
fs, path = url_to_fs(
f"s3://{test_bucket_name}", endpoint_url=endpoint_url, anon=False, asynchronous=True
)
return {"fs": fs, "path": path}
@pytest.fixture
async def store(self, store_kwargs: dict[str, Any]) -> FsspecStore:
return self.store_cls(**store_kwargs)
async def get(self, store: FsspecStore, key: str) -> Buffer:
# make a new, synchronous instance of the filesystem because this test is run in sync code
new_fs = fsspec.filesystem(
"s3", endpoint_url=store.fs.endpoint_url, anon=store.fs.anon, asynchronous=False
)
return self.buffer_cls.from_bytes(new_fs.cat(f"{store.path}/{key}"))
async def set(self, store: FsspecStore, key: str, value: Buffer) -> None:
# make a new, synchronous instance of the filesystem because this test is run in sync code
new_fs = fsspec.filesystem(
"s3", endpoint_url=store.fs.endpoint_url, anon=store.fs.anon, asynchronous=False
)
new_fs.write_bytes(f"{store.path}/{key}", value.to_bytes())
def test_store_repr(self, store: FsspecStore) -> None:
assert str(store) == "<FsspecStore(S3FileSystem, test)>"
def test_store_supports_writes(self, store: FsspecStore) -> None:
assert store.supports_writes
def test_store_supports_listing(self, store: FsspecStore) -> None:
assert store.supports_listing
async def test_fsspec_store_from_uri(self, store: FsspecStore) -> None:
storage_options = {
"endpoint_url": endpoint_url,
"anon": False,
}
meta: dict[str, JSON] = {
"attributes": {"key": "value"},
"zarr_format": 3,
"node_type": "group",
}
await store.set(
"zarr.json",
self.buffer_cls.from_bytes(json.dumps(meta).encode()),
)
group = await zarr.api.asynchronous.open_group(
store=f"s3://{test_bucket_name}", storage_options=storage_options
)
assert dict(group.attrs) == {"key": "value"}
meta = {
"attributes": {"key": "value-2"},
"zarr_format": 3,
"node_type": "group",
}
await store.set(
"directory-2/zarr.json",
self.buffer_cls.from_bytes(json.dumps(meta).encode()),
)
group = await zarr.api.asynchronous.open_group(
store=f"s3://{test_bucket_name}/directory-2", storage_options=storage_options
)
assert dict(group.attrs) == {"key": "value-2"}
meta = {
"attributes": {"key": "value-3"},
"zarr_format": 3,
"node_type": "group",
}
await store.set(
"directory-3/zarr.json",
self.buffer_cls.from_bytes(json.dumps(meta).encode()),
)
group = await zarr.api.asynchronous.open_group(
store=f"s3://{test_bucket_name}", path="directory-3", storage_options=storage_options
)
assert dict(group.attrs) == {"key": "value-3"}
@pytest.mark.skipif(
parse_version(fsspec.__version__) < parse_version("2024.03.01"),
reason="Prior bug in from_upath",
)
def test_from_upath(self) -> None:
upath = pytest.importorskip("upath")
path = upath.UPath(
f"s3://{test_bucket_name}/foo/bar/",
endpoint_url=endpoint_url,
anon=False,
asynchronous=True,
)
result = FsspecStore.from_upath(path)
assert result.fs.endpoint_url == endpoint_url
assert result.fs.asynchronous
assert result.path == f"{test_bucket_name}/foo/bar"
def test_init_warns_if_fs_asynchronous_is_false(self) -> None:
try:
from fsspec import url_to_fs
except ImportError:
# before fsspec==2024.3.1
from fsspec.core import url_to_fs
fs, path = url_to_fs(
f"s3://{test_bucket_name}", endpoint_url=endpoint_url, anon=False, asynchronous=False
)
store_kwargs = {"fs": fs, "path": path}
with pytest.warns(ZarrUserWarning, match=r".* was not created with `asynchronous=True`.*"):
self.store_cls(**store_kwargs)
async def test_empty_nonexistent_path(self, store_kwargs: dict[str, Any]) -> None:
# regression test for https://github.com/zarr-developers/zarr-python/pull/2343
store_kwargs["path"] += "/abc"
store = await self.store_cls.open(**store_kwargs)
assert await store.is_empty("")
async def test_delete_dir_unsupported_deletes(self, store: FsspecStore) -> None:
store.supports_deletes = False
with pytest.raises(
NotImplementedError,
match="This method is only available for stores that support deletes.",
):
await store.delete_dir("test_prefix")
def array_roundtrip(store: FsspecStore) -> None:
"""
Round trip an array using a Zarr store
Args:
store: FsspecStore
"""
data = np.ones((3, 3))
arr = zarr.create_array(store=store, overwrite=True, data=data)
assert isinstance(arr, Array)
# Read set values
arr2 = zarr.open_array(store=store)
assert isinstance(arr2, Array)
np.testing.assert_array_equal(arr[:], data)
@pytest.mark.skipif(
parse_version(fsspec.__version__) < parse_version("2024.12.0"),
reason="No AsyncFileSystemWrapper",
)
def test_wrap_sync_filesystem(tmp_path: pathlib.Path) -> None:
"""The local fs is not async so we should expect it to be wrapped automatically"""
from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper
store = FsspecStore.from_url(f"file://{tmp_path}", storage_options={"auto_mkdir": True})
assert isinstance(store.fs, AsyncFileSystemWrapper)
assert store.fs.async_impl
array_roundtrip(store)
@pytest.mark.skipif(
parse_version(fsspec.__version__) >= parse_version("2024.12.0"),
reason="No AsyncFileSystemWrapper",
)
def test_wrap_sync_filesystem_raises(tmp_path: pathlib.Path) -> None:
"""The local fs is not async so we should expect it to be wrapped automatically"""
with pytest.raises(ImportError, match="The filesystem .*"):
FsspecStore.from_url(f"file://{tmp_path}", storage_options={"auto_mkdir": True})
@pytest.mark.skipif(
parse_version(fsspec.__version__) < parse_version("2024.12.0"),
reason="No AsyncFileSystemWrapper",
)
def test_no_wrap_async_filesystem() -> None:
"""An async fs should not be wrapped automatically; fsspec's s3 filesystem is such an fs"""
from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper
store = FsspecStore.from_url(
f"s3://{test_bucket_name}/foo/spam/",
storage_options={"endpoint_url": endpoint_url, "anon": False, "asynchronous": True},
read_only=False,
)
assert not isinstance(store.fs, AsyncFileSystemWrapper)
assert store.fs.async_impl
array_roundtrip(store)
@pytest.mark.skipif(
parse_version(fsspec.__version__) < parse_version("2024.12.0"),
reason="No AsyncFileSystemWrapper",
)
def test_open_fsmap_file(tmp_path: pathlib.Path) -> None:
min_fsspec_with_async_wrapper = parse_version("2024.12.0")
current_version = parse_version(fsspec.__version__)
fs = fsspec.filesystem("file", auto_mkdir=True)
mapper = fs.get_mapper(tmp_path)
if current_version < min_fsspec_with_async_wrapper:
# Expect ImportError for older versions
with pytest.raises(
ImportError,
match=r"The filesystem .* is synchronous, and the required AsyncFileSystemWrapper is not available.*",
):
array_roundtrip(mapper)
else:
# Newer versions should work
array_roundtrip(mapper)
@pytest.mark.skipif(
parse_version(fsspec.__version__) < parse_version("2024.12.0"),
reason="No AsyncFileSystemWrapper",
)
def test_open_fsmap_file_raises(tmp_path: pathlib.Path) -> None:
fsspec = pytest.importorskip("fsspec.implementations.local")
fs = fsspec.LocalFileSystem(auto_mkdir=False)
mapper = fs.get_mapper(tmp_path)
with pytest.raises(FileNotFoundError, match="No such file or directory: .*"):
array_roundtrip(mapper)
@pytest.mark.parametrize("asynchronous", [True, False])
def test_open_fsmap_s3(asynchronous: bool) -> None:
s3_filesystem = s3fs.S3FileSystem(
asynchronous=asynchronous, endpoint_url=endpoint_url, anon=False
)
mapper = s3_filesystem.get_mapper(f"s3://{test_bucket_name}/map/foo/")
array_roundtrip(mapper)
def test_open_s3map_raises() -> None:
with pytest.raises(TypeError, match="Unsupported type for store_like:.*"):
zarr.open(store=0, mode="w", shape=(3, 3))
s3_filesystem = s3fs.S3FileSystem(asynchronous=True, endpoint_url=endpoint_url, anon=False)
mapper = s3_filesystem.get_mapper(f"s3://{test_bucket_name}/map/foo/")
with pytest.raises(
ValueError, match="'path' was provided but is not used for FSMap store_like objects"
):
zarr.open(store=mapper, path="bar", mode="w", shape=(3, 3))
with pytest.raises(
TypeError,
match="'storage_options' is only used when the store is passed as a FSSpec URI string.",
):
zarr.open(store=mapper, storage_options={"anon": True}, mode="w", shape=(3, 3))
@pytest.mark.parametrize("asynchronous", [True, False])
def test_make_async(asynchronous: bool) -> None:
s3_filesystem = s3fs.S3FileSystem(
asynchronous=asynchronous, endpoint_url=endpoint_url, anon=False
)
fs = _make_async(s3_filesystem)
assert fs.asynchronous
@pytest.mark.skipif(
parse_version(fsspec.__version__) < parse_version("2024.12.0"),
reason="No AsyncFileSystemWrapper",
)
async def test_delete_dir_wrapped_filesystem(tmp_path: Path) -> None:
from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper
from fsspec.implementations.local import LocalFileSystem
wrapped_fs = AsyncFileSystemWrapper(LocalFileSystem(auto_mkdir=True))
store = FsspecStore(wrapped_fs, read_only=False, path=f"{tmp_path}/test/path")
assert isinstance(store.fs, AsyncFileSystemWrapper)
assert store.fs.asynchronous
await store.set("zarr.json", cpu.Buffer.from_bytes(b"root"))
await store.set("foo-bar/zarr.json", cpu.Buffer.from_bytes(b"root"))
await store.set("foo/zarr.json", cpu.Buffer.from_bytes(b"bar"))
await store.set("foo/c/0", cpu.Buffer.from_bytes(b"chunk"))
await store.delete_dir("foo")
assert await store.exists("zarr.json")
assert await store.exists("foo-bar/zarr.json")
assert not await store.exists("foo/zarr.json")
assert not await store.exists("foo/c/0")
@pytest.mark.skipif(
parse_version(fsspec.__version__) < parse_version("2024.12.0"),
reason="No AsyncFileSystemWrapper",
)
async def test_with_read_only_auto_mkdir(tmp_path: Path) -> None:
"""
Test that creating a read-only copy of a store backed by the local file system does not error
if auto_mkdir is False.
"""
store_w = FsspecStore.from_url(f"file://{tmp_path}", storage_options={"auto_mkdir": False})
_ = store_w.with_read_only()
| TestFsspecStoreS3 |
python | huggingface__transformers | src/transformers/models/t5/modeling_t5.py | {
"start": 1616,
"end": 3078
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
try:
from apex.normalization import FusedRMSNorm
T5LayerNorm = FusedRMSNorm
logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm")
except ImportError:
# using the normal T5LayerNorm
pass
except Exception:
logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm")
| T5LayerNorm |
python | weaviate__weaviate-python-client | weaviate/collections/batch/batch_wrapper.py | {
"start": 4943,
"end": 8224
} | class ____(Protocol):
def add_object(
self,
collection: str,
properties: Optional[WeaviateProperties] = None,
references: Optional[ReferenceInputs] = None,
uuid: Optional[UUID] = None,
vector: Optional[VECTORS] = None,
tenant: Optional[Union[str, Tenant]] = None,
) -> UUID:
"""Add one object to this batch.
NOTE: If the UUID of one of the objects already exists then the existing object will be
replaced by the new object.
Args:
collection: The name of the collection this object belongs to.
properties: The data properties of the object to be added as a dictionary.
references: The references of the object to be added as a dictionary.
uuid: The UUID of the object as an uuid.UUID object or str. It can be a Weaviate beacon or Weaviate href.
If it is None an UUIDv4 will generated, by default None
vector: The embedding of the object. Can be used when a collection does not have a vectorization module or the given
vector was generated using the _identical_ vectorization module that is configured for the class. In this
case this vector takes precedence.
Supported types are:
- for single vectors: `list`, 'numpy.ndarray`, `torch.Tensor` and `tf.Tensor`, by default None.
- for named vectors: Dict[str, *list above*], where the string is the name of the vector.
tenant: The tenant name or Tenant object to be used for this request.
Returns:
The UUID of the added object. If one was not provided a UUIDv4 will be auto-generated for you and returned here.
Raises:
WeaviateBatchValidationError: If the provided options are in the format required by Weaviate.
"""
...
def add_reference(
self,
from_uuid: UUID,
from_collection: str,
from_property: str,
to: ReferenceInput,
tenant: Optional[Union[str, Tenant]] = None,
) -> None:
"""Add one reference to this batch.
Args:
from_uuid: The UUID of the object, as an uuid.UUID object or str, that should reference another object.
from_collection: The name of the collection that should reference another object.
from_property: The name of the property that contains the reference.
to: The UUID of the referenced object, as an uuid.UUID object or str, that is actually referenced.
For multi-target references use wvc.Reference.to_multi_target().
tenant: The tenant name or Tenant object to be used for this request.
Raises:
WeaviateBatchValidationError: If the provided options are in the format required by Weaviate.
"""
...
def flush(self) -> None:
"""Flush the current batch.
This will send all the objects and references in the current batch to Weaviate.
"""
...
@property
def number_errors(self) -> int:
"""Get the number of errors in the current batch.
Returns:
The number of errors in the current batch.
"""
...
| BatchClientProtocol |
python | openai__openai-python | src/openai/types/image_generate_params.py | {
"start": 362,
"end": 4517
} | class ____(TypedDict, total=False):
prompt: Required[str]
"""A text description of the desired image(s).
The maximum length is 32000 characters for `gpt-image-1`, 1000 characters for
`dall-e-2` and 4000 characters for `dall-e-3`.
"""
background: Optional[Literal["transparent", "opaque", "auto"]]
"""
Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
"""
model: Union[str, ImageModel, None]
"""The model to use for image generation.
One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`. Defaults to `dall-e-2` unless a
parameter specific to `gpt-image-1` is used.
"""
moderation: Optional[Literal["low", "auto"]]
"""Control the content-moderation level for images generated by `gpt-image-1`.
Must be either `low` for less restrictive filtering or `auto` (default value).
"""
n: Optional[int]
"""The number of images to generate.
Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
"""
output_compression: Optional[int]
"""The compression level (0-100%) for the generated images.
This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg`
output formats, and defaults to 100.
"""
output_format: Optional[Literal["png", "jpeg", "webp"]]
"""The format in which the generated images are returned.
This parameter is only supported for `gpt-image-1`. Must be one of `png`,
`jpeg`, or `webp`.
"""
partial_images: Optional[int]
"""The number of partial images to generate.
This parameter is used for streaming responses that return partial images. Value
must be between 0 and 3. When set to 0, the response will be a single image sent
in one streaming event.
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
"""
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]]
"""The quality of the image that will be generated.
- `auto` (default value) will automatically select the best quality for the
given model.
- `high`, `medium` and `low` are supported for `gpt-image-1`.
- `hd` and `standard` are supported for `dall-e-3`.
- `standard` is the only option for `dall-e-2`.
"""
response_format: Optional[Literal["url", "b64_json"]]
"""The format in which generated images with `dall-e-2` and `dall-e-3` are
returned.
Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
image has been generated. This parameter isn't supported for `gpt-image-1` which
will always return base64-encoded images.
"""
size: Optional[
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
]
"""The size of the generated images.
Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or
`auto` (default value) for `gpt-image-1`, one of `256x256`, `512x512`, or
`1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or `1024x1792`
for `dall-e-3`.
"""
style: Optional[Literal["vivid", "natural"]]
"""The style of the generated images.
This parameter is only supported for `dall-e-3`. Must be one of `vivid` or
`natural`. Vivid causes the model to lean towards generating hyper-real and
dramatic images. Natural causes the model to produce more natural, less
hyper-real looking images.
"""
user: str
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
| ImageGenerateParamsBase |
python | getsentry__sentry | tests/snuba/api/endpoints/test_discover_key_transactions.py | {
"start": 1103,
"end": 25334
} | class ____(TeamKeyTransactionTestBase):
def setUp(self) -> None:
super().setUp()
self.url = reverse("sentry-api-0-organization-key-transactions", args=[self.org.slug])
def test_key_transaction_without_feature(self) -> None:
project = self.create_project(name="qux", organization=self.org)
data = {
"project": [self.project.id, project.id],
"transaction": self.event_data["transaction"],
"team": "myteams",
}
for response in (
self.client.get(self.url, data=data, format="json"),
self.client.post(self.url, data=data, format="json"),
self.client.delete(self.url, data=data, format="json"),
):
assert response.status_code == 404, response.content
def test_get_key_transaction_multiple_projects(self) -> None:
project = self.create_project(name="qux", organization=self.org)
with self.feature(self.features):
response = self.client.get(
self.url,
data={
"project": [self.project.id, project.id],
"transaction": self.event_data["transaction"],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"detail": "Only 1 project per Key Transaction"}
def test_get_key_transaction_no_transaction_name(self) -> None:
with self.feature(self.features):
response = self.client.get(
self.url,
data={
"project": [self.project.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"detail": "A transaction name is required"}
def test_get_no_key_transaction(self) -> None:
with self.feature(self.features):
response = self.client.get(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == []
def test_get_key_transaction_my_teams(self) -> None:
team1 = self.create_team(organization=self.org, name="Team A")
team2 = self.create_team(organization=self.org, name="Team B")
team3 = self.create_team(organization=self.org, name="Team C")
# should not be in response because we never joined this team
self.create_team(organization=self.org, name="Team D")
# only join teams 1,2,3
for team in [team1, team2, team3]:
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
TeamKeyTransaction.objects.bulk_create(
[
TeamKeyTransaction(
organization=self.org,
project_team=project_team,
transaction=self.event_data["transaction"],
)
for project_team in ProjectTeam.objects.filter(
project=self.project, team__in=[team1, team2]
)
]
+ [
TeamKeyTransaction(
organization=self.org,
project_team=project_team,
transaction="other-transaction",
)
for project_team in ProjectTeam.objects.filter(
project=self.project, team__in=[team2, team3]
)
]
)
with self.feature(self.features):
response = self.client.get(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": "myteams",
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == [
{
"team": str(team1.id),
},
{
"team": str(team2.id),
},
]
def test_post_key_transaction_more_than_1_project(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
project = self.create_project(name="bar", organization=self.org)
project.add_team(team)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id, project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"detail": "Only 1 project per Key Transaction"}
def test_post_key_transaction_no_team(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"team": ["This field is required."]}
def test_post_key_transaction_no_transaction_name(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"team": [team.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"transaction": ["This field is required."]}
def test_post_key_transaction_no_access_team(self) -> None:
org = self.create_organization(
owner=self.user, # use other user as owner
name="foo",
flags=0, # disable default allow_joinleave
)
project = self.create_project(name="baz", organization=org)
user = self.create_user()
self.login_as(user=user, superuser=False)
team = self.create_team(organization=org, name="Team Foo")
self.create_team_membership(team, user=user)
project.add_team(team)
other_team = self.create_team(organization=org, name="Team Bar")
project.add_team(other_team)
with self.feature(self.features):
response = self.client.post(
reverse("sentry-api-0-organization-key-transactions", args=[org.slug]),
data={
"project": [project.id],
"transaction": self.event_data["transaction"],
"team": [other_team.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"team": [f"You do not have permission to access {other_team.name}"]
}
def test_post_key_transaction_no_access_project(self) -> None:
team1 = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team1, user=self.user)
self.project.add_team(team1)
team2 = self.create_team(organization=self.org, name="Team Bar")
self.create_team_membership(team2, user=self.user)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team2.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"detail": "Team does not have access to project"}
def test_post_key_transactions_exceed_limit(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
project_team = ProjectTeam.objects.get(project=self.project, team=team)
TeamKeyTransaction.objects.bulk_create(
[
TeamKeyTransaction(
organization=self.org,
project_team=project_team,
transaction=f"{self.event_data['transaction']}-{i}",
)
for i in range(MAX_TEAM_KEY_TRANSACTIONS)
]
)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"non_field_errors": [
f"At most {MAX_TEAM_KEY_TRANSACTIONS} Key Transactions can be added for a team"
]
}
def test_post_key_transaction_limit_is_per_team(self) -> None:
team1 = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team1, user=self.user)
self.project.add_team(team1)
team2 = self.create_team(organization=self.org, name="Team Bar")
self.create_team_membership(team2, user=self.user)
self.project.add_team(team2)
project_teams = ProjectTeam.objects.filter(project=self.project, team__in=[team1, team2])
TeamKeyTransaction.objects.bulk_create(
[
TeamKeyTransaction(
organization=self.org,
project_team=project_team,
transaction=f"{self.event_data['transaction']}-{i}",
)
for project_team in project_teams
for i in range(MAX_TEAM_KEY_TRANSACTIONS - 1)
]
)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team1.id, team2.id],
},
format="json",
)
assert response.status_code == 201, response.content
key_transactions = TeamKeyTransaction.objects.filter(project_team__team__in=[team1, team2])
assert len(key_transactions) == 2 * MAX_TEAM_KEY_TRANSACTIONS
def test_post_key_transactions(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
assert response.status_code == 201, response.content
key_transactions = TeamKeyTransaction.objects.filter(project_team__team=team)
assert len(key_transactions) == 1
def test_post_key_transactions_duplicate(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
project_team = ProjectTeam.objects.get(project=self.project, team=team)
TeamKeyTransaction.objects.create(
organization=self.org,
project_team=project_team,
transaction=self.event_data["transaction"],
)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
assert response.status_code == 204, response.content
key_transactions = TeamKeyTransaction.objects.filter(
project_team=project_team, transaction=self.event_data["transaction"]
)
assert len(key_transactions) == 1
def test_post_key_transaction_multiple_team(self) -> None:
team1 = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team1, user=self.user)
self.project.add_team(team1)
team2 = self.create_team(organization=self.org, name="Team Bar")
self.create_team_membership(team2, user=self.user)
self.project.add_team(team2)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team1.id, team2.id],
},
format="json",
)
assert response.status_code == 201, response.content
key_transactions = TeamKeyTransaction.objects.filter(
project_team__in=ProjectTeam.objects.filter(
project=self.project, team__in=[team1, team2]
),
transaction=self.event_data["transaction"],
)
assert len(key_transactions) == 2
def test_post_key_transaction_partially_existing_teams(self) -> None:
team1 = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team1, user=self.user)
self.project.add_team(team1)
team2 = self.create_team(organization=self.org, name="Team Bar")
self.create_team_membership(team2, user=self.user)
self.project.add_team(team2)
TeamKeyTransaction.objects.create(
organization=self.org,
project_team=ProjectTeam.objects.get(project=self.project, team=team1),
transaction=self.event_data["transaction"],
)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team1.id, team2.id],
},
format="json",
)
assert response.status_code == 201, response.content
key_transactions = TeamKeyTransaction.objects.filter(
project_team__in=ProjectTeam.objects.filter(
project=self.project, team__in=[team1, team2]
),
transaction=self.event_data["transaction"],
)
assert len(key_transactions) == 2
def test_post_key_transaction_multiple_users(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
assert response.status_code == 201, response.content
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
# already created by this user, so it's 204 this time
assert response.status_code == 204, response.content
user = self.create_user()
self.create_member(user=user, organization=self.org, role="member")
self.create_team_membership(team, user=user)
self.login_as(user=user, superuser=False)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
# already created by another user, so it's 204 this time
assert response.status_code == 204, response.content
# should only create 1 team key transaction
key_transactions = TeamKeyTransaction.objects.filter(project_team__team=team)
assert len(key_transactions) == 1
def test_post_key_transaction_overly_long_transaction(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
with self.feature(self.features):
response = self.client.post(
self.url,
data={
"project": [self.project.id],
"transaction": "a" * 500,
"team": [team.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"transaction": ["Ensure this field has no more than 200 characters."]
}
def test_delete_key_transaction_no_transaction_name(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
with self.feature(self.features):
response = self.client.delete(
self.url,
data={
"project": [self.project.id],
"team": [team.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"transaction": ["This field is required."]}
def test_delete_key_transaction_no_team(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
with self.feature(self.features):
response = self.client.delete(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {"team": ["This field is required."]}
def test_delete_key_transactions_no_exist(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
with self.feature(self.features):
response = self.client.delete(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
assert response.status_code == 204, response.content
key_transactions = TeamKeyTransaction.objects.filter(project_team__team=team)
assert len(key_transactions) == 0
def test_delete_key_transaction_no_access_team(self) -> None:
org = self.create_organization(
owner=self.user, # use other user as owner
name="foo",
flags=0, # disable default allow_joinleave
)
project = self.create_project(name="baz", organization=org)
user = self.create_user()
self.login_as(user=user, superuser=False)
team = self.create_team(organization=org, name="Team Foo")
self.create_team_membership(team, user=user)
project.add_team(team)
other_team = self.create_team(organization=org, name="Team Bar")
project.add_team(other_team)
TeamKeyTransaction.objects.create(
organization=org,
project_team=ProjectTeam.objects.get(project=project, team=team),
transaction=self.event_data["transaction"],
)
with self.feature(self.features):
response = self.client.delete(
reverse("sentry-api-0-organization-key-transactions", args=[org.slug]),
data={
"project": [project.id],
"transaction": self.event_data["transaction"],
"team": [other_team.id],
},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"team": [f"You do not have permission to access {other_team.name}"]
}
def test_delete_key_transactions(self) -> None:
team = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team, user=self.user)
self.project.add_team(team)
TeamKeyTransaction.objects.create(
organization=self.org,
project_team=ProjectTeam.objects.get(project=self.project, team=team),
transaction=self.event_data["transaction"],
)
with self.feature(self.features):
response = self.client.delete(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team.id],
},
format="json",
)
assert response.status_code == 204, response.content
key_transactions = TeamKeyTransaction.objects.filter(project_team__team=team)
assert len(key_transactions) == 0
def test_delete_key_transaction_partially_existing_teams(self) -> None:
team1 = self.create_team(organization=self.org, name="Team Foo")
self.create_team_membership(team1, user=self.user)
self.project.add_team(team1)
team2 = self.create_team(organization=self.org, name="Team Bar")
self.create_team_membership(team2, user=self.user)
self.project.add_team(team2)
TeamKeyTransaction.objects.create(
organization=self.org,
project_team=ProjectTeam.objects.get(project=self.project, team=team1),
transaction=self.event_data["transaction"],
)
with self.feature(self.features):
response = self.client.delete(
self.url,
data={
"project": [self.project.id],
"transaction": self.event_data["transaction"],
"team": [team1.id, team2.id],
},
format="json",
)
assert response.status_code == 204, response.content
| TeamKeyTransactionTest |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/unsupervised_learning/autoencoder.py | {
"start": 507,
"end": 4017
} | class ____():
"""An Autoencoder with deep fully-connected neural nets.
Training Data: MNIST Handwritten Digits (28x28 images)
"""
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.img_dim = self.img_rows * self.img_cols
self.latent_dim = 128 # The dimension of the data embedding
optimizer = Adam(learning_rate=0.0002, b1=0.5)
loss_function = SquareLoss
self.encoder = self.build_encoder(optimizer, loss_function)
self.decoder = self.build_decoder(optimizer, loss_function)
self.autoencoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
self.autoencoder.layers.extend(self.encoder.layers)
self.autoencoder.layers.extend(self.decoder.layers)
print ()
self.autoencoder.summary(name="Variational Autoencoder")
def build_encoder(self, optimizer, loss_function):
encoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
encoder.add(Dense(512, input_shape=(self.img_dim,)))
encoder.add(Activation('leaky_relu'))
encoder.add(BatchNormalization(momentum=0.8))
encoder.add(Dense(256))
encoder.add(Activation('leaky_relu'))
encoder.add(BatchNormalization(momentum=0.8))
encoder.add(Dense(self.latent_dim))
return encoder
def build_decoder(self, optimizer, loss_function):
decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function)
decoder.add(Dense(256, input_shape=(self.latent_dim,)))
decoder.add(Activation('leaky_relu'))
decoder.add(BatchNormalization(momentum=0.8))
decoder.add(Dense(512))
decoder.add(Activation('leaky_relu'))
decoder.add(BatchNormalization(momentum=0.8))
decoder.add(Dense(self.img_dim))
decoder.add(Activation('tanh'))
return decoder
def train(self, n_epochs, batch_size=128, save_interval=50):
mnist = fetch_mldata('MNIST original')
X = mnist.data
y = mnist.target
# Rescale [-1, 1]
X = (X.astype(np.float32) - 127.5) / 127.5
for epoch in range(n_epochs):
# Select a random half batch of images
idx = np.random.randint(0, X.shape[0], batch_size)
imgs = X[idx]
# Train the Autoencoder
loss, _ = self.autoencoder.train_on_batch(imgs, imgs)
# Display the progress
print ("%d [D loss: %f]" % (epoch, loss))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
self.save_imgs(epoch, X)
def save_imgs(self, epoch, X):
r, c = 5, 5 # Grid size
# Select a random half batch of images
idx = np.random.randint(0, X.shape[0], r*c)
imgs = X[idx]
# Generate images and reshape to image shape
gen_imgs = self.autoencoder.predict(imgs).reshape((-1, self.img_rows, self.img_cols))
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
plt.suptitle("Autoencoder")
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt,:,:], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("ae_%d.png" % epoch)
plt.close()
if __name__ == '__main__':
ae = Autoencoder()
ae.train(n_epochs=200000, batch_size=64, save_interval=400)
| Autoencoder |
python | doocs__leetcode | solution/3100-3199/3123.Find Edges in Shortest Paths/Solution.py | {
"start": 0,
"end": 901
} | class ____:
def findAnswer(self, n: int, edges: List[List[int]]) -> List[bool]:
g = defaultdict(list)
for i, (a, b, w) in enumerate(edges):
g[a].append((b, w, i))
g[b].append((a, w, i))
dist = [inf] * n
dist[0] = 0
q = [(0, 0)]
while q:
da, a = heappop(q)
if da > dist[a]:
continue
for b, w, _ in g[a]:
if dist[b] > dist[a] + w:
dist[b] = dist[a] + w
heappush(q, (dist[b], b))
m = len(edges)
ans = [False] * m
if dist[n - 1] == inf:
return ans
q = deque([n - 1])
while q:
a = q.popleft()
for b, w, i in g[a]:
if dist[a] == dist[b] + w:
ans[i] = True
q.append(b)
return ans
| Solution |
python | huggingface__transformers | tests/models/deformable_detr/test_image_processing_deformable_detr.py | {
"start": 4801,
"end": 35510
} | class ____(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DeformableDetrImageProcessor if is_vision_available() else None
fast_image_processing_class = DeformableDetrImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = DeformableDetrImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad, True)
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 1333})
@slow
def test_call_pytorch_with_coco_detection_annotations(self):
# prepare image and target
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
target = {"image_id": 39769, "annotations": target}
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class()
encoding = image_processing(images=image, annotations=target, return_tensors="pt")
# verify pixel values
expected_shape = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_call_pytorch_with_coco_panoptic_annotations(self):
# prepare image, target and masks_path
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f:
target = json.loads(f.read())
target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class(format="coco_panoptic")
encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt")
# verify pixel values
expected_shape = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify masks
expected_masks_sum = 822873
relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum
self.assertTrue(relative_error < 1e-3)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_detection_annotations with Detr->DeformableDetr
def test_batched_coco_detection_annotations(self):
image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
annotations_0 = {"image_id": 39769, "annotations": target}
annotations_1 = {"image_id": 39769, "annotations": target}
# Adjust the bounding boxes for the resized image
w_0, h_0 = image_0.size
w_1, h_1 = image_1.size
for i in range(len(annotations_1["annotations"])):
coords = annotations_1["annotations"][i]["bbox"]
new_bbox = [
coords[0] * w_1 / w_0,
coords[1] * h_1 / h_0,
coords[2] * w_1 / w_0,
coords[3] * h_1 / h_0,
]
annotations_1["annotations"][i]["bbox"] = new_bbox
images = [image_0, image_1]
annotations = [annotations_0, annotations_1]
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class()
encoding = image_processing(
images=images,
annotations=annotations,
return_segmentation_masks=True,
return_tensors="pt", # do_convert_annotations=True
)
# Check the pixel values have been padded
postprocessed_height, postprocessed_width = 800, 1066
expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# Check the bounding boxes have been adjusted for padded images
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
expected_boxes_0 = torch.tensor(
[
[0.6879, 0.4609, 0.0755, 0.3691],
[0.2118, 0.3359, 0.2601, 0.1566],
[0.5011, 0.5000, 0.9979, 1.0000],
[0.5010, 0.5020, 0.9979, 0.9959],
[0.3284, 0.5944, 0.5884, 0.8112],
[0.8394, 0.5445, 0.3213, 0.9110],
]
)
expected_boxes_1 = torch.tensor(
[
[0.4130, 0.2765, 0.0453, 0.2215],
[0.1272, 0.2016, 0.1561, 0.0940],
[0.3757, 0.4933, 0.7488, 0.9865],
[0.3759, 0.5002, 0.7492, 0.9955],
[0.1971, 0.5456, 0.3532, 0.8646],
[0.5790, 0.4115, 0.3430, 0.7161],
]
)
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066]))
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
# format and not in the range [0, 1]
encoding = image_processing(
images=images,
annotations=annotations,
return_segmentation_masks=True,
do_convert_annotations=False,
return_tensors="pt",
)
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
# Convert to absolute coordinates
unnormalized_boxes_0 = torch.vstack(
[
expected_boxes_0[:, 0] * postprocessed_width,
expected_boxes_0[:, 1] * postprocessed_height,
expected_boxes_0[:, 2] * postprocessed_width,
expected_boxes_0[:, 3] * postprocessed_height,
]
).T
unnormalized_boxes_1 = torch.vstack(
[
expected_boxes_1[:, 0] * postprocessed_width,
expected_boxes_1[:, 1] * postprocessed_height,
expected_boxes_1[:, 2] * postprocessed_width,
expected_boxes_1[:, 3] * postprocessed_height,
]
).T
# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
expected_boxes_0 = torch.vstack(
[
unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
]
).T
expected_boxes_1 = torch.vstack(
[
unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations with Detr->DeformableDetr
def test_batched_coco_panoptic_annotations(self):
# prepare image, target and masks_path
image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f:
target = json.loads(f.read())
annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
w_0, h_0 = image_0.size
w_1, h_1 = image_1.size
for i in range(len(annotation_1["segments_info"])):
coords = annotation_1["segments_info"][i]["bbox"]
new_bbox = [
coords[0] * w_1 / w_0,
coords[1] * h_1 / h_0,
coords[2] * w_1 / w_0,
coords[3] * h_1 / h_0,
]
annotation_1["segments_info"][i]["bbox"] = new_bbox
masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
images = [image_0, image_1]
annotations = [annotation_0, annotation_1]
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class(format="coco_panoptic")
encoding = image_processing(
images=images,
annotations=annotations,
masks_path=masks_path,
return_tensors="pt",
return_segmentation_masks=True,
)
# Check the pixel values have been padded
postprocessed_height, postprocessed_width = 800, 1066
expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# Check the bounding boxes have been adjusted for padded images
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
expected_boxes_0 = torch.tensor(
[
[0.2625, 0.5437, 0.4688, 0.8625],
[0.7719, 0.4104, 0.4531, 0.7125],
[0.5000, 0.4927, 0.9969, 0.9854],
[0.1688, 0.2000, 0.2063, 0.0917],
[0.5492, 0.2760, 0.0578, 0.2187],
[0.4992, 0.4990, 0.9984, 0.9979],
]
)
expected_boxes_1 = torch.tensor(
[
[0.1576, 0.3262, 0.2814, 0.5175],
[0.4634, 0.2463, 0.2720, 0.4275],
[0.3002, 0.2956, 0.5985, 0.5913],
[0.1013, 0.1200, 0.1238, 0.0550],
[0.3297, 0.1656, 0.0347, 0.1312],
[0.2997, 0.2994, 0.5994, 0.5987],
]
)
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066]))
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
# format and not in the range [0, 1]
encoding = image_processing(
images=images,
annotations=annotations,
masks_path=masks_path,
return_segmentation_masks=True,
do_convert_annotations=False,
return_tensors="pt",
)
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
# Convert to absolute coordinates
unnormalized_boxes_0 = torch.vstack(
[
expected_boxes_0[:, 0] * postprocessed_width,
expected_boxes_0[:, 1] * postprocessed_height,
expected_boxes_0[:, 2] * postprocessed_width,
expected_boxes_0[:, 3] * postprocessed_height,
]
).T
unnormalized_boxes_1 = torch.vstack(
[
expected_boxes_1[:, 0] * postprocessed_width,
expected_boxes_1[:, 1] * postprocessed_height,
expected_boxes_1[:, 2] * postprocessed_width,
expected_boxes_1[:, 3] * postprocessed_height,
]
).T
# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
expected_boxes_0 = torch.vstack(
[
unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
]
).T
expected_boxes_1 = torch.vstack(
[
unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->DeformableDetr
def test_max_width_max_height_resizing_and_pad_strategy(self):
for image_processing_class in self.image_processor_list:
image_1 = torch.ones([200, 100, 3], dtype=torch.uint8)
# do_pad=False, max_height=100, max_width=100, image=200x100 -> 100x50
image_processor = image_processing_class(
size={"max_height": 100, "max_width": 100},
do_pad=False,
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 50]))
# do_pad=False, max_height=300, max_width=100, image=200x100 -> 200x100
image_processor = image_processing_class(
size={"max_height": 300, "max_width": 100},
do_pad=False,
)
inputs = image_processor(images=[image_1], return_tensors="pt")
# do_pad=True, max_height=100, max_width=100, image=200x100 -> 100x100
image_processor = image_processing_class(
size={"max_height": 100, "max_width": 100}, do_pad=True, pad_size={"height": 100, "width": 100}
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 100]))
# do_pad=True, max_height=300, max_width=100, image=200x100 -> 300x100
image_processor = image_processing_class(
size={"max_height": 300, "max_width": 100},
do_pad=True,
pad_size={"height": 301, "width": 101},
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 301, 101]))
### Check for batch
image_2 = torch.ones([100, 150, 3], dtype=torch.uint8)
# do_pad=True, max_height=150, max_width=100, images=[200x100, 100x150] -> 150x100
image_processor = image_processing_class(
size={"max_height": 150, "max_width": 100},
do_pad=True,
pad_size={"height": 150, "width": 100},
)
inputs = image_processor(images=[image_1, image_2], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([2, 3, 150, 100]))
def test_longest_edge_shortest_edge_resizing_strategy(self):
for image_processing_class in self.image_processor_list:
image_1 = torch.ones([958, 653, 3], dtype=torch.uint8)
# max size is set; width < height;
# do_pad=False, longest_edge=640, shortest_edge=640, image=958x653 -> 640x436
image_processor = image_processing_class(
size={"longest_edge": 640, "shortest_edge": 640},
do_pad=False,
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 640, 436]))
image_2 = torch.ones([653, 958, 3], dtype=torch.uint8)
# max size is set; height < width;
# do_pad=False, longest_edge=640, shortest_edge=640, image=653x958 -> 436x640
image_processor = image_processing_class(
size={"longest_edge": 640, "shortest_edge": 640},
do_pad=False,
)
inputs = image_processor(images=[image_2], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 436, 640]))
image_3 = torch.ones([100, 120, 3], dtype=torch.uint8)
# max size is set; width == size; height > max_size;
# do_pad=False, longest_edge=118, shortest_edge=100, image=120x100 -> 118x98
image_processor = image_processing_class(
size={"longest_edge": 118, "shortest_edge": 100},
do_pad=False,
)
inputs = image_processor(images=[image_3], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 98, 118]))
image_4 = torch.ones([128, 50, 3], dtype=torch.uint8)
# max size is set; height == size; width < max_size;
# do_pad=False, longest_edge=256, shortest_edge=50, image=50x128 -> 50x128
image_processor = image_processing_class(
size={"longest_edge": 256, "shortest_edge": 50},
do_pad=False,
)
inputs = image_processor(images=[image_4], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 128, 50]))
image_5 = torch.ones([50, 50, 3], dtype=torch.uint8)
# max size is set; height == width; width < max_size;
# do_pad=False, longest_edge=117, shortest_edge=50, image=50x50 -> 50x50
image_processor = image_processing_class(
size={"longest_edge": 117, "shortest_edge": 50},
do_pad=False,
)
inputs = image_processor(images=[image_5], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 50, 50]))
@slow
@require_torch_accelerator
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations
def test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations(self):
# prepare image and target
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
target = {"image_id": 39769, "annotations": target}
# Ignore copy
processor = self.image_processor_list[1]()
# 1. run processor on CPU
encoding_cpu = processor(images=image, annotations=target, return_tensors="pt", device="cpu")
# 2. run processor on accelerator
encoding_gpu = processor(images=image, annotations=target, return_tensors="pt", device=torch_device)
# verify pixel values
self.assertEqual(encoding_cpu["pixel_values"].shape, encoding_gpu["pixel_values"].shape)
self.assertTrue(
torch.allclose(
encoding_cpu["pixel_values"][0, 0, 0, :3],
encoding_gpu["pixel_values"][0, 0, 0, :3].to("cpu"),
atol=1e-4,
)
)
# verify area
torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
# verify boxes
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
self.assertTrue(
torch.allclose(
encoding_cpu["labels"][0]["boxes"][0], encoding_gpu["labels"][0]["boxes"][0].to("cpu"), atol=1e-3
)
)
# verify image_id
torch.testing.assert_close(
encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
)
# verify is_crowd
torch.testing.assert_close(
encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
)
# verify class_labels
self.assertTrue(
torch.allclose(
encoding_cpu["labels"][0]["class_labels"], encoding_gpu["labels"][0]["class_labels"].to("cpu")
)
)
# verify orig_size
torch.testing.assert_close(
encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
)
# verify size
torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
@slow
@require_torch_accelerator
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_fast_processor_equivalence_cpu_accelerator_coco_panoptic_annotations
def test_fast_processor_equivalence_cpu_accelerator_coco_panoptic_annotations(self):
# prepare image, target and masks_path
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f:
target = json.loads(f.read())
target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# Ignore copy
processor = self.image_processor_list[1](format="coco_panoptic")
# 1. run processor on CPU
encoding_cpu = processor(
images=image, annotations=target, masks_path=masks_path, return_tensors="pt", device="cpu"
)
# 2. run processor on accelerator
encoding_gpu = processor(
images=image, annotations=target, masks_path=masks_path, return_tensors="pt", device=torch_device
)
# verify pixel values
self.assertEqual(encoding_cpu["pixel_values"].shape, encoding_gpu["pixel_values"].shape)
self.assertTrue(
torch.allclose(
encoding_cpu["pixel_values"][0, 0, 0, :3],
encoding_gpu["pixel_values"][0, 0, 0, :3].to("cpu"),
atol=1e-4,
)
)
# verify area
torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
# verify boxes
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
self.assertTrue(
torch.allclose(
encoding_cpu["labels"][0]["boxes"][0], encoding_gpu["labels"][0]["boxes"][0].to("cpu"), atol=1e-3
)
)
# verify image_id
torch.testing.assert_close(
encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
)
# verify is_crowd
torch.testing.assert_close(
encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
)
# verify class_labels
self.assertTrue(
torch.allclose(
encoding_cpu["labels"][0]["class_labels"], encoding_gpu["labels"][0]["class_labels"].to("cpu")
)
)
# verify masks
masks_sum_cpu = encoding_cpu["labels"][0]["masks"].sum()
masks_sum_gpu = encoding_gpu["labels"][0]["masks"].sum()
relative_error = torch.abs(masks_sum_cpu - masks_sum_gpu) / masks_sum_cpu
self.assertTrue(relative_error < 1e-3)
# verify orig_size
torch.testing.assert_close(
encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
)
# verify size
torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
| DeformableDetrImageProcessingTest |
python | pyca__cryptography | tests/hazmat/primitives/test_ciphers.py | {
"start": 1319,
"end": 1876
} | class ____:
@pytest.mark.parametrize(
("key", "keysize"),
[(b"0" * 32, 128), (b"0" * 48, 192), (b"0" * 64, 256)],
)
def test_key_size(self, key, keysize):
cipher = AES(binascii.unhexlify(key))
assert cipher.key_size == keysize
def test_invalid_key_size(self):
with pytest.raises(ValueError):
AES(binascii.unhexlify(b"0" * 12))
def test_invalid_key_type(self):
with pytest.raises(TypeError, match="key must be bytes"):
AES("0" * 32) # type: ignore[arg-type]
| TestAES |
python | wandb__wandb | wandb/vendor/pygments/lexers/jvm.py | {
"start": 48582,
"end": 51009
} | class ____(RegexLexer):
"""
For `Pig Latin <https://pig.apache.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Pig'
aliases = ['pig']
filenames = ['*.pig']
mimetypes = ['text/x-pig']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'\\\n', Text),
(r'\\', Text),
(r'\'(?:\\[ntbrf\\\']|\\u[0-9a-f]{4}|[^\'\\\n\r])*\'', String),
include('keywords'),
include('types'),
include('builtins'),
include('punct'),
include('operators'),
(r'[0-9]*\.[0-9]+(e[0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text),
(r'([a-z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[()#:]', Text),
(r'[^(:#\'")\s]+', Text),
(r'\S+\s+', Text) # TODO: make tests pass without \s+
],
'keywords': [
(r'(assert|and|any|all|arrange|as|asc|bag|by|cache|CASE|cat|cd|cp|'
r'%declare|%default|define|dense|desc|describe|distinct|du|dump|'
r'eval|exex|explain|filter|flatten|foreach|full|generate|group|'
r'help|if|illustrate|import|inner|input|into|is|join|kill|left|'
r'limit|load|ls|map|matches|mkdir|mv|not|null|onschema|or|order|'
r'outer|output|parallel|pig|pwd|quit|register|returns|right|rm|'
r'rmf|rollup|run|sample|set|ship|split|stderr|stdin|stdout|store|'
r'stream|through|union|using|void)\b', Keyword)
],
'builtins': [
(r'(AVG|BinStorage|cogroup|CONCAT|copyFromLocal|copyToLocal|COUNT|'
r'cross|DIFF|MAX|MIN|PigDump|PigStorage|SIZE|SUM|TextLoader|'
r'TOKENIZE)\b', Name.Builtin)
],
'types': [
(r'(bytearray|BIGINTEGER|BIGDECIMAL|chararray|datetime|double|float|'
r'int|long|tuple)\b', Keyword.Type)
],
'punct': [
(r'[;(){}\[\]]', Punctuation),
],
'operators': [
(r'[#=,./%+\-?]', Operator),
(r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
(r'(==|<=|<|>=|>|!=)', Operator),
],
}
| PigLexer |
python | ethereum__web3.py | web3/types.py | {
"start": 3442,
"end": 4507
} | class ____(TypedDict):
chainId: int
address: Address | ChecksumAddress | str
nonce: Nonce
y_parity: int
r: int
s: int
# syntax b/c "from" keyword not allowed w/ class construction
TxParams = TypedDict(
"TxParams",
{
"accessList": AccessList,
"authorizationList": Sequence[
Union[SetCodeAuthorizationParams, SignedSetCodeAuthorization]
],
"blobVersionedHashes": Sequence[Union[str, HexStr, bytes, HexBytes]],
"chainId": int,
"data": Union[bytes, HexStr],
# addr or ens
"from": Union[Address, ChecksumAddress, str],
"gas": int,
# legacy pricing
"gasPrice": Wei,
"maxFeePerBlobGas": Union[str, Wei],
# dynamic fee pricing
"maxFeePerGas": Union[str, Wei],
"maxPriorityFeePerGas": Union[str, Wei],
"nonce": Nonce,
# addr or ens
"to": Union[Address, ChecksumAddress, str],
"type": Union[int, HexStr],
"value": Wei,
},
total=False,
)
| SetCodeAuthorizationParams |
python | sympy__sympy | sympy/plotting/series.py | {
"start": 4153,
"end": 38452
} | class ____:
"""Base class for the data objects containing stuff to be plotted.
Notes
=====
The backend should check if it supports the data series that is given.
(e.g. TextBackend supports only LineOver1DRangeSeries).
It is the backend responsibility to know how to use the class of
data series that is given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (e.g. LineOver1DRangeSeries belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
BaseSeries
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
# Different from is_contour as the colormap in backend will be
# different
is_interactive = False
# An interactive series can update its data.
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
is_generic = False
# Represent generic user-provided numerical data
is_vector = False
is_2Dvector = False
is_3Dvector = False
# Represents a 2D or 3D vector data series
_N = 100
# default number of discretization points for uniform sampling. Each
# subclass can set its number.
def __init__(self, *args, **kwargs):
kwargs = _set_discretization_points(kwargs.copy(), type(self))
# discretize the domain using only integer numbers
self.only_integers = kwargs.get("only_integers", False)
# represents the evaluation modules to be used by lambdify
self.modules = kwargs.get("modules", None)
# plot functions might create data series that might not be useful to
# be shown on the legend, for example wireframe lines on 3D plots.
self.show_in_legend = kwargs.get("show_in_legend", True)
# line and surface series can show data with a colormap, hence a
# colorbar is essential to understand the data. However, sometime it
# is useful to hide it on series-by-series base. The following keyword
# controls whether the series should show a colorbar or not.
self.colorbar = kwargs.get("colorbar", True)
# Some series might use a colormap as default coloring. Setting this
# attribute to False will inform the backends to use solid color.
self.use_cm = kwargs.get("use_cm", False)
# If True, the backend will attempt to render it on a polar-projection
# axis, or using a polar discretization if a 3D plot is requested
self.is_polar = kwargs.get("is_polar", kwargs.get("polar", False))
# If True, the rendering will use points, not lines.
self.is_point = kwargs.get("is_point", kwargs.get("point", False))
# some backend is able to render latex, other needs standard text
self._label = self._latex_label = ""
self._ranges = []
self._n = [
int(kwargs.get("n1", self._N)),
int(kwargs.get("n2", self._N)),
int(kwargs.get("n3", self._N))
]
self._scales = [
kwargs.get("xscale", "linear"),
kwargs.get("yscale", "linear"),
kwargs.get("zscale", "linear")
]
# enable interactive widget plots
self._params = kwargs.get("params", {})
if not isinstance(self._params, dict):
raise TypeError("`params` must be a dictionary mapping symbols "
"to numeric values.")
if len(self._params) > 0:
self.is_interactive = True
# contains keyword arguments that will be passed to the rendering
# function of the chosen plotting library
self.rendering_kw = kwargs.get("rendering_kw", {})
# numerical transformation functions to be applied to the output data:
# x, y, z (coordinates), p (parameter on parametric plots)
self._tx = kwargs.get("tx", None)
self._ty = kwargs.get("ty", None)
self._tz = kwargs.get("tz", None)
self._tp = kwargs.get("tp", None)
if not all(callable(t) or (t is None) for t in
[self._tx, self._ty, self._tz, self._tp]):
raise TypeError("`tx`, `ty`, `tz`, `tp` must be functions.")
# list of numerical functions representing the expressions to evaluate
self._functions = []
# signature for the numerical functions
self._signature = []
# some expressions don't like to be evaluated over complex data.
# if that's the case, set this to True
self._force_real_eval = kwargs.get("force_real_eval", None)
# this attribute will eventually contain a dictionary with the
# discretized ranges
self._discretized_domain = None
# whether the series contains any interactive range, which is a range
# where the minimum and maximum values can be changed with an
# interactive widget
self._interactive_ranges = False
# NOTE: consider a generic summation, for example:
# s = Sum(cos(pi * x), (x, 1, y))
# This gets lambdified to something:
# sum(cos(pi*x) for x in range(1, y+1))
# Hence, y needs to be an integer, otherwise it raises:
# TypeError: 'complex' object cannot be interpreted as an integer
# This list will contains symbols that are upper bound to summations
# or products
self._needs_to_be_int = []
# a color function will be responsible to set the line/surface color
# according to some logic. Each data series will et an appropriate
# default value.
self.color_func = None
# NOTE: color_func usually receives numerical functions that are going
# to be evaluated over the coordinates of the computed points (or the
# discretized meshes).
# However, if an expression is given to color_func, then it will be
# lambdified with symbols in self._signature, and it will be evaluated
# with the same data used to evaluate the plotted expression.
self._eval_color_func_with_signature = False
def _block_lambda_functions(self, *exprs):
"""Some data series can be used to plot numerical functions, others
cannot. Execute this method inside the `__init__` to prevent the
processing of numerical functions.
"""
if any(callable(e) for e in exprs):
raise TypeError(type(self).__name__ + " requires a symbolic "
"expression.")
def _check_fs(self):
""" Checks if there are enough parameters and free symbols.
"""
exprs, ranges = self.expr, self.ranges
params, label = self.params, self.label
exprs = exprs if hasattr(exprs, "__iter__") else [exprs]
if any(callable(e) for e in exprs):
return
# from the expression's free symbols, remove the ones used in
# the parameters and the ranges
fs = _get_free_symbols(exprs)
fs = fs.difference(params.keys())
if ranges is not None:
fs = fs.difference([r[0] for r in ranges])
if len(fs) > 0:
raise ValueError(
"Incompatible expression and parameters.\n"
+ "Expression: {}\n".format(
(exprs, ranges, label) if ranges is not None else (exprs, label))
+ "params: {}\n".format(params)
+ "Specify what these symbols represent: {}\n".format(fs)
+ "Are they ranges or parameters?"
)
# verify that all symbols are known (they either represent plotting
# ranges or parameters)
range_symbols = [r[0] for r in ranges]
for r in ranges:
fs = set().union(*[e.free_symbols for e in r[1:]])
if any(t in fs for t in range_symbols):
# ranges can't depend on each other, for example this are
# not allowed:
# (x, 0, y), (y, 0, 3)
# (x, 0, y), (y, x + 2, 3)
raise ValueError("Range symbols can't be included into "
"minimum and maximum of a range. "
"Received range: %s" % str(r))
if len(fs) > 0:
self._interactive_ranges = True
remaining_fs = fs.difference(params.keys())
if len(remaining_fs) > 0:
raise ValueError(
"Unknown symbols found in plotting range: %s. " % (r,) +
"Are the following parameters? %s" % remaining_fs)
def _create_lambda_func(self):
"""Create the lambda functions to be used by the uniform meshing
strategy.
Notes
=====
The old sympy.plotting used experimental_lambdify. It created one
lambda function each time an evaluation was requested. If that failed,
it went on to create a different lambda function and evaluated it,
and so on.
This new module changes strategy: it creates right away the default
lambda function as well as the backup one. The reason is that the
series could be interactive, hence the numerical function will be
evaluated multiple times. So, let's create the functions just once.
This approach works fine for the majority of cases, in which the
symbolic expression is relatively short, hence the lambdification
is fast. If the expression is very long, this approach takes twice
the time to create the lambda functions. Be aware of that!
"""
exprs = self.expr if hasattr(self.expr, "__iter__") else [self.expr]
if not any(callable(e) for e in exprs):
fs = _get_free_symbols(exprs)
self._signature = sorted(fs, key=lambda t: t.name)
# Generate a list of lambda functions, two for each expression:
# 1. the default one.
# 2. the backup one, in case of failures with the default one.
self._functions = []
for e in exprs:
# TODO: set cse=True once this issue is solved:
# https://github.com/sympy/sympy/issues/24246
self._functions.append([
lambdify(self._signature, e, modules=self.modules),
lambdify(self._signature, e, modules="sympy", dummify=True),
])
else:
self._signature = sorted([r[0] for r in self.ranges], key=lambda t: t.name)
self._functions = [(e, None) for e in exprs]
# deal with symbolic color_func
if isinstance(self.color_func, Expr):
self.color_func = lambdify(self._signature, self.color_func)
self._eval_color_func_with_signature = True
def _update_range_value(self, t):
"""If the value of a plotting range is a symbolic expression,
substitute the parameters in order to get a numerical value.
"""
if not self._interactive_ranges:
return complex(t)
return complex(t.subs(self.params))
def _create_discretized_domain(self):
"""Discretize the ranges for uniform meshing strategy.
"""
# NOTE: the goal is to create a dictionary stored in
# self._discretized_domain, mapping symbols to a numpy array
# representing the discretization
discr_symbols = []
discretizations = []
# create a 1D discretization
for i, r in enumerate(self.ranges):
discr_symbols.append(r[0])
c_start = self._update_range_value(r[1])
c_end = self._update_range_value(r[2])
start = c_start.real if c_start.imag == c_end.imag == 0 else c_start
end = c_end.real if c_start.imag == c_end.imag == 0 else c_end
needs_integer_discr = self.only_integers or (r[0] in self._needs_to_be_int)
d = BaseSeries._discretize(start, end, self.n[i],
scale=self.scales[i],
only_integers=needs_integer_discr)
if ((not self._force_real_eval) and (not needs_integer_discr) and
(d.dtype != "complex")):
d = d + 1j * c_start.imag
if needs_integer_discr:
d = d.astype(int)
discretizations.append(d)
# create 2D or 3D
self._create_discretized_domain_helper(discr_symbols, discretizations)
def _create_discretized_domain_helper(self, discr_symbols, discretizations):
"""Create 2D or 3D discretized grids.
Subclasses should override this method in order to implement a
different behaviour.
"""
np = import_module('numpy')
# discretization suitable for 2D line plots, 3D surface plots,
# contours plots, vector plots
# NOTE: why indexing='ij'? Because it produces consistent results with
# np.mgrid. This is important as Mayavi requires this indexing
# to correctly compute 3D streamlines. While VTK is able to compute
# streamlines regardless of the indexing, with indexing='xy' it
# produces "strange" results with "voids" into the
# discretization volume. indexing='ij' solves the problem.
# Also note that matplotlib 2D streamlines requires indexing='xy'.
indexing = "xy"
if self.is_3Dvector or (self.is_3Dsurface and self.is_implicit):
indexing = "ij"
meshes = np.meshgrid(*discretizations, indexing=indexing)
self._discretized_domain = dict(zip(discr_symbols, meshes))
def _evaluate(self, cast_to_real=True):
"""Evaluation of the symbolic expression (or expressions) with the
uniform meshing strategy, based on current values of the parameters.
"""
np = import_module('numpy')
# create lambda functions
if not self._functions:
self._create_lambda_func()
# create (or update) the discretized domain
if (not self._discretized_domain) or self._interactive_ranges:
self._create_discretized_domain()
# ensure that discretized domains are returned with the proper order
discr = [self._discretized_domain[s[0]] for s in self.ranges]
args = self._aggregate_args()
results = []
for f in self._functions:
r = _uniform_eval(*f, *args)
# the evaluation might produce an int/float. Need this correction.
r = self._correct_shape(np.array(r), discr[0])
# sometime the evaluation is performed over arrays of type object.
# hence, `result` might be of type object, which don't work well
# with numpy real and imag functions.
r = r.astype(complex)
results.append(r)
if cast_to_real:
discr = [np.real(d.astype(complex)) for d in discr]
return [*discr, *results]
def _aggregate_args(self):
"""Create a list of arguments to be passed to the lambda function,
sorted according to self._signature.
"""
args = []
for s in self._signature:
if s in self._params.keys():
args.append(
int(self._params[s]) if s in self._needs_to_be_int else
self._params[s] if self._force_real_eval
else complex(self._params[s]))
else:
args.append(self._discretized_domain[s])
return args
@property
def expr(self):
"""Return the expression (or expressions) of the series."""
return self._expr
@expr.setter
def expr(self, e):
"""Set the expression (or expressions) of the series."""
is_iter = hasattr(e, "__iter__")
is_callable = callable(e) if not is_iter else any(callable(t) for t in e)
if is_callable:
self._expr = e
else:
self._expr = sympify(e) if not is_iter else Tuple(*e)
# look for the upper bound of summations and products
s = set()
for e in self._expr.atoms(Sum, Product):
for a in e.args[1:]:
if isinstance(a[-1], Symbol):
s.add(a[-1])
self._needs_to_be_int = list(s)
# list of sympy functions that when lambdified, the corresponding
# numpy functions don't like complex-type arguments
pf = [ceiling, floor, atan2, frac, zeta]
if self._force_real_eval is not True:
check_res = [self._expr.has(f) for f in pf]
self._force_real_eval = any(check_res)
if self._force_real_eval and ((self.modules is None) or
(isinstance(self.modules, str) and "numpy" in self.modules)):
funcs = [f for f, c in zip(pf, check_res) if c]
warnings.warn("NumPy is unable to evaluate with complex "
"numbers some of the functions included in this "
"symbolic expression: %s. " % funcs +
"Hence, the evaluation will use real numbers. "
"If you believe the resulting plot is incorrect, "
"change the evaluation module by setting the "
"`modules` keyword argument.")
if self._functions:
# update lambda functions
self._create_lambda_func()
@property
def is_3D(self):
flags3D = [self.is_3Dline, self.is_3Dsurface, self.is_3Dvector]
return any(flags3D)
@property
def is_line(self):
flagslines = [self.is_2Dline, self.is_3Dline]
return any(flagslines)
def _line_surface_color(self, prop, val):
"""This method enables back-compatibility with old sympy.plotting"""
# NOTE: color_func is set inside the init method of the series.
# If line_color/surface_color is not a callable, then color_func will
# be set to None.
setattr(self, prop, val)
if callable(val) or isinstance(val, Expr):
self.color_func = val
setattr(self, prop, None)
elif val is not None:
self.color_func = None
@property
def line_color(self):
return self._line_color
@line_color.setter
def line_color(self, val):
self._line_surface_color("_line_color", val)
@property
def n(self):
"""Returns a list [n1, n2, n3] of numbers of discratization points.
"""
return self._n
@n.setter
def n(self, v):
"""Set the numbers of discretization points. ``v`` must be an int or
a list.
Let ``s`` be a series. Then:
* to set the number of discretization points along the x direction (or
first parameter): ``s.n = 10``
* to set the number of discretization points along the x and y
directions (or first and second parameters): ``s.n = [10, 15]``
* to set the number of discretization points along the x, y and z
directions: ``s.n = [10, 15, 20]``
The following is highly unreccomended, because it prevents
the execution of necessary code in order to keep updated data:
``s.n[1] = 15``
"""
if not hasattr(v, "__iter__"):
self._n[0] = v
else:
self._n[:len(v)] = v
if self._discretized_domain:
# update the discretized domain
self._create_discretized_domain()
@property
def params(self):
"""Get or set the current parameters dictionary.
Parameters
==========
p : dict
* key: symbol associated to the parameter
* val: the numeric value
"""
return self._params
@params.setter
def params(self, p):
self._params = p
def _post_init(self):
exprs = self.expr if hasattr(self.expr, "__iter__") else [self.expr]
if any(callable(e) for e in exprs) and self.params:
raise TypeError("`params` was provided, hence an interactive plot "
"is expected. However, interactive plots do not support "
"user-provided numerical functions.")
# if the expressions is a lambda function and no label has been
# provided, then its better to do the following in order to avoid
# surprises on the backend
if any(callable(e) for e in exprs):
if self._label == str(self.expr):
self.label = ""
self._check_fs()
if hasattr(self, "adaptive") and self.adaptive and self.params:
warnings.warn("`params` was provided, hence an interactive plot "
"is expected. However, interactive plots do not support "
"adaptive evaluation. Automatically switched to "
"adaptive=False.")
self.adaptive = False
@property
def scales(self):
return self._scales
@scales.setter
def scales(self, v):
if isinstance(v, str):
self._scales[0] = v
else:
self._scales[:len(v)] = v
@property
def surface_color(self):
return self._surface_color
@surface_color.setter
def surface_color(self, val):
self._line_surface_color("_surface_color", val)
@property
def rendering_kw(self):
return self._rendering_kw
@rendering_kw.setter
def rendering_kw(self, kwargs):
if isinstance(kwargs, dict):
self._rendering_kw = kwargs
else:
self._rendering_kw = {}
if kwargs is not None:
warnings.warn(
"`rendering_kw` must be a dictionary, instead an "
"object of type %s was received. " % type(kwargs) +
"Automatically setting `rendering_kw` to an empty "
"dictionary")
@staticmethod
def _discretize(start, end, N, scale="linear", only_integers=False):
"""Discretize a 1D domain.
Returns
=======
domain : np.ndarray with dtype=float or complex
The domain's dtype will be float or complex (depending on the
type of start/end) even if only_integers=True. It is left for
the downstream code to perform further casting, if necessary.
"""
np = import_module('numpy')
if only_integers is True:
start, end = int(start), int(end)
N = end - start + 1
if scale == "linear":
return np.linspace(start, end, N)
return np.geomspace(start, end, N)
@staticmethod
def _correct_shape(a, b):
"""Convert ``a`` to a np.ndarray of the same shape of ``b``.
Parameters
==========
a : int, float, complex, np.ndarray
Usually, this is the result of a numerical evaluation of a
symbolic expression. Even if a discretized domain was used to
evaluate the function, the result can be a scalar (int, float,
complex). Think for example to ``expr = Float(2)`` and
``f = lambdify(x, expr)``. No matter the shape of the numerical
array representing x, the result of the evaluation will be
a single value.
b : np.ndarray
It represents the correct shape that ``a`` should have.
Returns
=======
new_a : np.ndarray
An array with the correct shape.
"""
np = import_module('numpy')
if not isinstance(a, np.ndarray):
a = np.array(a)
if a.shape != b.shape:
if a.shape == ():
a = a * np.ones_like(b)
else:
a = a.reshape(b.shape)
return a
def eval_color_func(self, *args):
"""Evaluate the color function.
Parameters
==========
args : tuple
Arguments to be passed to the coloring function. Can be coordinates
or parameters or both.
Notes
=====
The backend will request the data series to generate the numerical
data. Depending on the data series, either the data series itself or
the backend will eventually execute this function to generate the
appropriate coloring value.
"""
np = import_module('numpy')
if self.color_func is None:
# NOTE: with the line_color and surface_color attributes
# (back-compatibility with the old sympy.plotting module) it is
# possible to create a plot with a callable line_color (or
# surface_color). For example:
# p = plot(sin(x), line_color=lambda x, y: -y)
# This creates a ColoredLineOver1DRangeSeries with line_color=None
# and color_func=lambda x, y: -y, which effectively is a
# parametric series. Later we could change it to a string value:
# p[0].line_color = "red"
# However, this sets ine_color="red" and color_func=None, but the
# series is still ColoredLineOver1DRangeSeries (a parametric
# series), which will render using a color_func...
warnings.warn("This is likely not the result you were "
"looking for. Please, re-execute the plot command, this time "
"with the appropriate an appropriate value to line_color "
"or surface_color.")
return np.ones_like(args[0])
if self._eval_color_func_with_signature:
args = self._aggregate_args()
color = self.color_func(*args)
_re, _im = np.real(color), np.imag(color)
_re[np.invert(np.isclose(_im, np.zeros_like(_im)))] = np.nan
return _re
nargs = arity(self.color_func)
if nargs == 1:
if self.is_2Dline and self.is_parametric:
if len(args) == 2:
# ColoredLineOver1DRangeSeries
return self._correct_shape(self.color_func(args[0]), args[0])
# Parametric2DLineSeries
return self._correct_shape(self.color_func(args[2]), args[2])
elif self.is_3Dline and self.is_parametric:
return self._correct_shape(self.color_func(args[3]), args[3])
elif self.is_3Dsurface and self.is_parametric:
return self._correct_shape(self.color_func(args[3]), args[3])
return self._correct_shape(self.color_func(args[0]), args[0])
elif nargs == 2:
if self.is_3Dsurface and self.is_parametric:
return self._correct_shape(self.color_func(*args[3:]), args[3])
return self._correct_shape(self.color_func(*args[:2]), args[0])
return self._correct_shape(self.color_func(*args[:nargs]), args[0])
def get_data(self):
"""Compute and returns the numerical data.
The number of parameters returned by this method depends on the
specific instance. If ``s`` is the series, make sure to read
``help(s.get_data)`` to understand what it returns.
"""
raise NotImplementedError
def _get_wrapped_label(self, label, wrapper):
"""Given a latex representation of an expression, wrap it inside
some characters. Matplotlib needs "$%s%$", K3D-Jupyter needs "%s".
"""
return wrapper % label
def get_label(self, use_latex=False, wrapper="$%s$"):
"""Return the label to be used to display the expression.
Parameters
==========
use_latex : bool
If False, the string representation of the expression is returned.
If True, the latex representation is returned.
wrapper : str
The backend might need the latex representation to be wrapped by
some characters. Default to ``"$%s$"``.
Returns
=======
label : str
"""
if use_latex is False:
return self._label
if self._label == str(self.expr):
# when the backend requests a latex label and user didn't provide
# any label
return self._get_wrapped_label(self._latex_label, wrapper)
return self._latex_label
@property
def label(self):
return self.get_label()
@label.setter
def label(self, val):
"""Set the labels associated to this series."""
# NOTE: the init method of any series requires a label. If the user do
# not provide it, the preprocessing function will set label=None, which
# informs the series to initialize two attributes:
# _label contains the string representation of the expression.
# _latex_label contains the latex representation of the expression.
self._label = self._latex_label = val
@property
def ranges(self):
return self._ranges
@ranges.setter
def ranges(self, val):
new_vals = []
for v in val:
if v is not None:
new_vals.append(tuple([sympify(t) for t in v]))
self._ranges = new_vals
def _apply_transform(self, *args):
"""Apply transformations to the results of numerical evaluation.
Parameters
==========
args : tuple
Results of numerical evaluation.
Returns
=======
transformed_args : tuple
Tuple containing the transformed results.
"""
t = lambda x, transform: x if transform is None else transform(x)
x, y, z = None, None, None
if len(args) == 2:
x, y = args
return t(x, self._tx), t(y, self._ty)
elif (len(args) == 3) and isinstance(self, Parametric2DLineSeries):
x, y, u = args
return (t(x, self._tx), t(y, self._ty), t(u, self._tp))
elif len(args) == 3:
x, y, z = args
return t(x, self._tx), t(y, self._ty), t(z, self._tz)
elif (len(args) == 4) and isinstance(self, Parametric3DLineSeries):
x, y, z, u = args
return (t(x, self._tx), t(y, self._ty), t(z, self._tz), t(u, self._tp))
elif len(args) == 4: # 2D vector plot
x, y, u, v = args
return (
t(x, self._tx), t(y, self._ty),
t(u, self._tx), t(v, self._ty)
)
elif (len(args) == 5) and isinstance(self, ParametricSurfaceSeries):
x, y, z, u, v = args
return (t(x, self._tx), t(y, self._ty), t(z, self._tz), u, v)
elif (len(args) == 6) and self.is_3Dvector: # 3D vector plot
x, y, z, u, v, w = args
return (
t(x, self._tx), t(y, self._ty), t(z, self._tz),
t(u, self._tx), t(v, self._ty), t(w, self._tz)
)
elif len(args) == 6: # complex plot
x, y, _abs, _arg, img, colors = args
return (
x, y, t(_abs, self._tz), _arg, img, colors)
return args
def _str_helper(self, s):
pre, post = "", ""
if self.is_interactive:
pre = "interactive "
post = " and parameters " + str(tuple(self.params.keys()))
return pre + s + post
def _detect_poles_numerical_helper(x, y, eps=0.01, expr=None, symb=None, symbolic=False):
"""Compute the steepness of each segment. If it's greater than a
threshold, set the right-point y-value non NaN and record the
corresponding x-location for further processing.
Returns
=======
x : np.ndarray
Unchanged x-data.
yy : np.ndarray
Modified y-data with NaN values.
"""
np = import_module('numpy')
yy = y.copy()
threshold = np.pi / 2 - eps
for i in range(len(x) - 1):
dx = x[i + 1] - x[i]
dy = abs(y[i + 1] - y[i])
angle = np.arctan(dy / dx)
if abs(angle) >= threshold:
yy[i + 1] = np.nan
return x, yy
def _detect_poles_symbolic_helper(expr, symb, start, end):
"""Attempts to compute symbolic discontinuities.
Returns
=======
pole : list
List of symbolic poles, possibly empty.
"""
poles = []
interval = Interval(nsimplify(start), nsimplify(end))
res = continuous_domain(expr, symb, interval)
res = res.simplify()
if res == interval:
pass
elif (isinstance(res, Union) and
all(isinstance(t, Interval) for t in res.args)):
poles = []
for s in res.args:
if s.left_open:
poles.append(s.left)
if s.right_open:
poles.append(s.right)
poles = list(set(poles))
else:
raise ValueError(
f"Could not parse the following object: {res} .\n"
"Please, submit this as a bug. Consider also to set "
"`detect_poles=True`."
)
return poles
### 2D lines
| BaseSeries |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 101086,
"end": 101308
} | class ____(Structure):
_fields_ = [("bIsPresent", c_uint, 1),
("percentage", c_uint),
("incThreshold", c_uint),
("decThreshold", c_uint)]
| c_nvmlGpuDynamicPstatesUtilization_t |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-llama-dataset-metadata/llama_index/packs/llama_dataset_metadata/base.py | {
"start": 1543,
"end": 1746
} | class ____(BaseModel):
"""Base Metadata class."""
class Config:
alias_generator = to_camel
allow_population_by_field_name = True
arbitrary_types_allowed = True
| BaseMetadata |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 7423,
"end": 8053
} | class ____(HTTPError, httplib_IncompleteRead):
"""
Response length doesn't match expected Content-Length
Subclass of :class:`http.client.IncompleteRead` to allow int value
for ``partial`` to avoid creating large objects on streamed reads.
"""
partial: int # type: ignore[assignment]
expected: int
def __init__(self, partial: int, expected: int) -> None:
self.partial = partial
self.expected = expected
def __repr__(self) -> str:
return "IncompleteRead(%i bytes read, %i more expected)" % (
self.partial,
self.expected,
)
| IncompleteRead |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/graph.py | {
"start": 1410,
"end": 2271
} | class ____(NamedTuple):
"""Edge in a graph."""
source: str
"""The source node id."""
target: str
"""The target node id."""
data: Stringifiable | None = None
"""Optional data associated with the edge. """
conditional: bool = False
"""Whether the edge is conditional."""
def copy(self, *, source: str | None = None, target: str | None = None) -> Edge:
"""Return a copy of the edge with optional new source and target nodes.
Args:
source: The new source node id.
target: The new target node id.
Returns:
A copy of the edge with the new source and target nodes.
"""
return Edge(
source=source or self.source,
target=target or self.target,
data=self.data,
conditional=self.conditional,
)
| Edge |
python | fluentpython__example-code-2e | 17-it-generator/aritprog_v0.py | {
"start": 129,
"end": 563
} | class ____:
def __init__(self, begin, step, end=None):
self.begin = begin
self.step = step
self.end = end # None -> "infinite" series
def __iter__(self):
result_type = type(self.begin + self.step)
result = result_type(self.begin)
forever = self.end is None
while forever or result < self.end:
yield result
result += self.step
| ArithmeticProgression |
python | huggingface__transformers | tests/models/detr/test_image_processing_detr.py | {
"start": 1267,
"end": 4853
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_pad=True,
):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_pad = do_pad
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to DetrImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
return self.num_channels, height, width
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| DetrImageProcessingTester |
python | mlflow__mlflow | mlflow/genai/judges/tools/get_span.py | {
"start": 583,
"end": 5387
} | class ____(JudgeTool):
"""
Tool for retrieving a specific span by its ID.
Returns the complete span data including inputs, outputs, attributes, and events.
"""
@property
def name(self) -> str:
return ToolNames.GET_SPAN
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames.GET_SPAN,
description=(
"Retrieve a specific span by its ID. Returns the complete span data "
"including inputs, outputs, attributes, events, and timing information. "
"Use this when you need to examine the full details of a particular span. "
"Large content may be paginated. Consider selecting only relevant attributes "
"to reduce data size and improve efficiency."
),
parameters=ToolParamsSchema(
type="object",
properties={
"span_id": {
"type": "string",
"description": "The ID of the span to retrieve",
},
"attributes_to_fetch": {
"type": "array",
"items": {"type": "string"},
"description": (
"List of specific attributes to fetch from the span. If specified, "
"only these attributes will be returned. If not specified, all "
"attributes are returned. It is recommended to use list_spans "
"first to see available attribute names, then select relevant ones."
),
},
"max_content_length": {
"type": "integer",
"description": "Maximum content size in bytes (default: 100000)",
},
"page_token": {
"type": "string",
"description": "Token to retrieve the next page of content",
},
},
required=["span_id"],
),
),
type="function",
)
def invoke(
self,
trace: Trace,
span_id: str,
attributes_to_fetch: list[str] | None = None,
max_content_length: int = 100000,
page_token: str | None = None,
) -> SpanResult:
"""
Get a specific span by ID from the trace.
Args:
trace: The MLflow trace object to analyze
span_id: The ID of the span to retrieve
attributes_to_fetch: List of specific attributes to fetch (None for all)
max_content_length: Maximum content size in bytes to return
page_token: Token to retrieve the next page (offset in bytes)
Returns:
SpanResult with the span content as JSON string
"""
if not trace or not trace.data or not trace.data.spans:
return SpanResult(
span_id=None, content=None, content_size_bytes=0, error="Trace has no spans"
)
target_span = None
for span in trace.data.spans:
if span.span_id == span_id:
target_span = span
break
if not target_span:
return SpanResult(
span_id=None,
content=None,
content_size_bytes=0,
error=f"Span with ID '{span_id}' not found in trace",
)
span_dict = target_span.to_dict()
if attributes_to_fetch is not None and span_dict.get("attributes"):
filtered_attributes = {}
for attr in attributes_to_fetch:
if attr in span_dict["attributes"]:
filtered_attributes[attr] = span_dict["attributes"][attr]
span_dict["attributes"] = filtered_attributes
full_content = json.dumps(span_dict, default=str, indent=2)
total_size = len(full_content.encode("utf-8"))
start_offset = parse_page_token(page_token)
end_offset = min(start_offset + max_content_length, total_size)
content_chunk = full_content[start_offset:end_offset]
next_page_token = create_page_token(end_offset) if end_offset < total_size else None
return SpanResult(
span_id=target_span.span_id,
content=content_chunk,
content_size_bytes=len(content_chunk.encode("utf-8")),
page_token=next_page_token,
error=None,
)
| GetSpanTool |
python | getsentry__sentry | tests/sentry/search/events/builder/test_metrics.py | {
"start": 5671,
"end": 61876
} | class ____(MetricBuilderBaseTest):
@pytest.mark.querybuilder
def test_default_conditions(self) -> None:
query = MetricsQueryBuilder(
self.params, query="", dataset=Dataset.PerformanceMetrics, selected_columns=[]
)
self.assertCountEqual(query.where, self.default_conditions)
def test_column_resolution(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=["tags[transaction]", "transaction"],
)
self.assertCountEqual(
query.columns,
[
self.build_transaction_transform("tags[transaction]"),
self.build_transaction_transform("transaction"),
],
)
def test_simple_aggregates(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"p50(transaction.duration)",
"p75(measurements.lcp)",
"p90(measurements.fcp)",
"p95(measurements.cls)",
"p99(measurements.fid)",
],
)
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(
self.organization.id,
[
"transaction.duration",
"measurements.lcp",
"measurements.fcp",
"measurements.cls",
"measurements.fid",
],
),
],
)
self.assertCountEqual(
query.distributions,
[
_metric_percentile_definition(self.organization.id, "50"),
_metric_percentile_definition(self.organization.id, "75", "measurements.lcp"),
_metric_percentile_definition(self.organization.id, "90", "measurements.fcp"),
_metric_percentile_definition(self.organization.id, "95", "measurements.cls"),
_metric_percentile_definition(self.organization.id, "99", "measurements.fid"),
],
)
def test_custom_percentile_throws_error(self) -> None:
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"percentile(transaction.duration, 0.11)",
],
)
def test_percentile_function(self) -> None:
self.maxDiff = None
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"percentile(transaction.duration, 0.75)",
],
)
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(
self.organization.id,
[
"transaction.duration",
],
),
],
)
self.assertCountEqual(
query.distributions,
[
Function(
"arrayElement",
[
Function(
"quantilesIf(0.75)",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
indexer.resolve(
UseCaseID.TRANSACTIONS,
self.organization.id,
constants.METRICS_MAP["transaction.duration"],
),
],
),
],
),
1,
],
"percentile_transaction_duration_0_75",
)
],
)
def test_metric_condition_dedupe(self) -> None:
org_id = 1
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"p50(transaction.duration)",
"p75(transaction.duration)",
"p90(transaction.duration)",
"p95(transaction.duration)",
"p99(transaction.duration)",
],
)
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(org_id, ["transaction.duration"]),
],
)
def test_p100(self) -> None:
"""While p100 isn't an actual quantile in the distributions table, its equivalent to max"""
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"p100(transaction.duration)",
],
)
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(
self.organization.id,
[
"transaction.duration",
],
),
],
)
self.assertCountEqual(
query.distributions,
[
Function(
"maxIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
indexer.resolve(
UseCaseID.TRANSACTIONS,
self.organization.id,
constants.METRICS_MAP["transaction.duration"],
),
],
),
],
"p100_transaction_duration",
)
],
)
def test_grouping(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=["transaction", "project", "p95(transaction.duration)"],
)
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(self.organization.id, ["transaction.duration"]),
],
)
transaction = self.build_transaction_transform("transaction")
project = AliasedExpression(
Column("project_id"),
"project",
)
self.assertCountEqual(
query.groupby,
[
transaction,
project,
],
)
self.assertCountEqual(
query.distributions, [_metric_percentile_definition(self.organization.id, "95")]
)
def test_transaction_filter(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="transaction:foo_transaction",
dataset=Dataset.PerformanceMetrics,
selected_columns=["transaction", "project", "p95(transaction.duration)"],
)
transaction_name = resolve_tag_value(
UseCaseID.TRANSACTIONS, self.organization.id, "foo_transaction"
)
transaction = self.build_transaction_transform("transaction")
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(self.organization.id, ["transaction.duration"]),
Condition(transaction, Op.EQ, transaction_name),
],
)
def test_transaction_in_filter(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="transaction:[foo_transaction, bar_transaction]",
dataset=Dataset.PerformanceMetrics,
selected_columns=["transaction", "project", "p95(transaction.duration)"],
)
transaction_name1 = resolve_tag_value(
UseCaseID.TRANSACTIONS, self.organization.id, "foo_transaction"
)
transaction_name2 = resolve_tag_value(
UseCaseID.TRANSACTIONS, self.organization.id, "bar_transaction"
)
transaction = self.build_transaction_transform("transaction")
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(self.organization.id, ["transaction.duration"]),
Condition(transaction, Op.IN, [transaction_name1, transaction_name2]),
],
)
def test_incorrect_parameter_for_metrics(self) -> None:
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=["transaction", "count_unique(test)"],
)
def test_project_filter(self) -> None:
query = MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=["transaction", "project", "p95(transaction.duration)"],
)
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(self.organization.id, ["transaction.duration"]),
Condition(Column("project_id"), Op.EQ, self.project.id),
],
)
def test_limit_validation(self) -> None:
# 51 is ok
MetricsQueryBuilder(self.params, limit=51)
# None is ok, defaults to 50
query = MetricsQueryBuilder(self.params)
assert query.limit is not None
assert query.limit.limit == 50
# anything higher should throw an error
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(self.params, limit=10_000)
def test_granularity(self) -> None:
# Need to pick granularity based on the period
def get_granularity(start, end):
params = {
"organization_id": self.organization.id,
"project_id": self.projects,
"start": start,
"end": end,
}
query = MetricsQueryBuilder(params)
return query.granularity.granularity
# If we're doing atleast day and its midnight we should use the daily bucket
start = datetime.datetime(2015, 5, 18, 0, 0, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 19, 0, 0, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 86400, "A day at midnight"
# If we're doing several days, allow more range
start = datetime.datetime(2015, 5, 18, 0, 10, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 28, 23, 59, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 86400, "Several days"
# We're doing a long period, use the biggest granularity
start = datetime.datetime(2015, 5, 18, 12, 33, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 7, 28, 17, 22, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 86400, "Big range"
# If we're on the start of the hour we should use the hour granularity
start = datetime.datetime(2015, 5, 18, 23, 0, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 20, 1, 0, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "On the hour"
# If we're close to the start of the hour we should use the hour granularity
start = datetime.datetime(2015, 5, 18, 23, 3, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 21, 1, 57, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "On the hour, close"
# A decently long period but not close to hour ends, still use hour bucket
start = datetime.datetime(2015, 5, 18, 23, 3, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 28, 1, 57, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "On the hour, long period"
# Hour to hour should only happen at the precise hour
start = datetime.datetime(2015, 5, 18, 10, 0, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 18, 18, 0, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "precisely hour to hour"
# Even a few seconds means we need to switch back to minutes since the latter bucket may not be filled
start = datetime.datetime(2015, 5, 18, 10, 0, 1, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 18, 18, 0, 1, tzinfo=timezone.utc)
assert get_granularity(start, end) == 60, "hour to hour but with seconds"
# Even though this is >24h of data, because its a random hour in the middle of the day to the next we use minute
# granularity
start = datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 18, 18, 15, 1, tzinfo=timezone.utc)
assert get_granularity(start, end) == 60, "A few hours, but random minute"
# Less than a minute, no reason to work hard for such a small window, just use a minute
start = datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 18, 10, 15, 34, tzinfo=timezone.utc)
assert get_granularity(start, end) == 60, "less than a minute"
def test_granularity_boundaries(self) -> None:
# Need to pick granularity based on the period
def get_granularity(start, end):
params = {
"organization_id": self.organization.id,
"project_id": self.projects,
"start": start,
"end": end,
}
query = MetricsQueryBuilder(params)
return query.granularity.granularity
# See resolve_granularity on the MQB to see what these boundaries are
# Exactly 30d, at the 30 minute boundary
start = datetime.datetime(2015, 5, 1, 0, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 31, 0, 30, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 86400, "30d at boundary"
# Near 30d, but 1 hour before the boundary for end
start = datetime.datetime(2015, 5, 1, 0, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 30, 23, 29, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "near 30d, but 1 hour before boundary for end"
# Near 30d, but 1 hour after the boundary for start
start = datetime.datetime(2015, 5, 1, 1, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 31, 0, 30, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "near 30d, but 1 hour after boundary for start"
# Exactly 3d
start = datetime.datetime(2015, 5, 1, 0, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 4, 0, 30, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 86400, "3d at boundary"
# Near 3d, but 1 hour before the boundary for end
start = datetime.datetime(2015, 5, 1, 0, 13, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 3, 23, 45, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "near 3d, but 1 hour before boundary for end"
# Near 3d, but 1 hour after the boundary for start
start = datetime.datetime(2015, 5, 1, 1, 46, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 4, 0, 46, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "near 3d, but 1 hour after boundary for start"
# exactly 12 hours
start = datetime.datetime(2015, 5, 1, 0, 15, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 1, 12, 15, 0, tzinfo=timezone.utc)
assert get_granularity(start, end) == 3600, "12h at boundary"
# Near 12h, but 15 minutes before the boundary for end
start = datetime.datetime(2015, 5, 1, 0, 15, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 1, 12, 0, 0, tzinfo=timezone.utc)
assert (
get_granularity(start, end) == 60
), "12h at boundary, but 15 min before the boundary for end"
# Near 12h, but 15 minutes after the boundary for start
start = datetime.datetime(2015, 5, 1, 0, 30, 0, tzinfo=timezone.utc)
end = datetime.datetime(2015, 5, 1, 12, 15, 0, tzinfo=timezone.utc)
assert (
get_granularity(start, end) == 60
), "12h at boundary, but 15 min after the boundary for start"
def test_get_snql_query(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=["p90(transaction.duration)"],
)
snql_request = query.get_snql_query()
assert snql_request.dataset == "generic_metrics"
snql_query = snql_request.query
self.assertCountEqual(
snql_query.select,
[
_metric_percentile_definition(self.organization.id, "90"),
],
)
self.assertCountEqual(
query.where,
[
*self.default_conditions,
*_metric_conditions(self.organization.id, ["transaction.duration"]),
],
)
def test_get_snql_query_errors_with_multiple_dataset(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=["p90(transaction.duration)", "count_unique(user)"],
)
with pytest.raises(NotImplementedError):
query.get_snql_query()
def test_get_snql_query_errors_with_no_functions(self) -> None:
query = MetricsQueryBuilder(
self.params, query="", dataset=Dataset.PerformanceMetrics, selected_columns=["project"]
)
with pytest.raises(IncompatibleMetricsQuery):
query.get_snql_query()
def test_run_query(self) -> None:
self.store_transaction_metric(
100,
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
100,
metric="measurements.lcp",
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
1000,
metric="measurements.lcp",
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"p95(transaction.duration)",
"p100(measurements.lcp)",
],
)
result = query.run_query("test_query")
assert len(result["data"]) == 1
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"p95_transaction_duration": 100,
"p100_measurements_lcp": 1000,
}
self.assertCountEqual(
result["meta"],
[
{"name": "transaction", "type": self.expected_tag_value_type},
{"name": "p95_transaction_duration", "type": "Float64"},
{"name": "p100_measurements_lcp", "type": "Float64"},
],
)
def test_run_query_multiple_tables(self) -> None:
self.store_transaction_metric(
100,
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
1,
metric="user",
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"p95(transaction.duration)",
"count_unique(user)",
],
)
result = query.run_query("test_query")
assert len(result["data"]) == 1
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"p95_transaction_duration": 100,
"count_unique_user": 1,
}
self.assertCountEqual(
result["meta"],
[
{"name": "transaction", "type": self.expected_tag_value_type},
{"name": "p95_transaction_duration", "type": "Float64"},
{"name": "count_unique_user", "type": "UInt64"},
],
)
def test_run_query_with_multiple_groupby_orderby_distribution(self) -> None:
self.setup_orderby_data()
query = MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
orderby="-p95(transaction.duration)",
)
result = query.run_query("test_query")
assert len(result["data"]) == 2
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 100,
"count_unique_user": 1,
}
assert result["data"][1] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"bar_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 50,
"count_unique_user": 2,
}
self.assertCountEqual(
result["meta"],
[
{"name": "transaction", "type": self.expected_tag_value_type},
{"name": "project", "type": "UInt64"},
{"name": "p95_transaction_duration", "type": "Float64"},
{"name": "count_unique_user", "type": "UInt64"},
],
)
def test_run_query_with_multiple_groupby_orderby_set(self) -> None:
self.setup_orderby_data()
query = MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
orderby="-count_unique(user)",
)
result = query.run_query("test_query")
assert len(result["data"]) == 2
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"bar_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 50,
"count_unique_user": 2,
}
assert result["data"][1] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 100,
"count_unique_user": 1,
}
self.assertCountEqual(
result["meta"],
[
{"name": "transaction", "type": self.expected_tag_value_type},
{"name": "project", "type": "UInt64"},
{"name": "p95_transaction_duration", "type": "Float64"},
{"name": "count_unique_user", "type": "UInt64"},
],
)
def test_run_query_with_project_orderby(self) -> None:
project_1 = self.create_project(slug="aaaaaa")
project_2 = self.create_project(slug="zzzzzz")
for project in [project_1, project_2]:
self.store_transaction_metric(
100,
tags={"transaction": "foo_transaction"},
project=project.id,
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.params["project_id"] = [project_1.id, project_2.id]
query = MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
],
orderby="project",
)
result = query.run_query("test_query")
assert len(result["data"]) == 2
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"project": project_1.id,
"p95_transaction_duration": 100,
}
assert result["data"][1] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"project": project_2.id,
"p95_transaction_duration": 100,
}
query = MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
],
orderby="-project",
)
result = query.run_query("test_query")
assert len(result["data"]) == 2
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"project": project_2.id,
"p95_transaction_duration": 100,
}
assert result["data"][1] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"project": project_1.id,
"p95_transaction_duration": 100,
}
def test_run_query_with_transactions_orderby(self) -> None:
for transaction_name in ["aaa", "zzz", "bbb"]:
self.store_transaction_metric(
100,
tags={"transaction": transaction_name},
project=self.project.id,
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
],
orderby="-transaction",
)
result = query.run_query("test_query")
assert len(result["data"]) == 3
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"zzz",
),
"project": self.project.id,
"p95_transaction_duration": 100,
}
assert result["data"][1] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"bbb",
),
"project": self.project.id,
"p95_transaction_duration": 100,
}
# TODO: multiple groupby with counter
def test_run_query_with_events_per_aggregates(self) -> None:
for i in range(5):
self.store_transaction_metric(
100, timestamp=self.start + datetime.timedelta(minutes=i * 15)
)
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"eps()",
"epm()",
"tps()",
"tpm()",
],
)
result = query.run_query("test_query")
data = result["data"][0]
# Check the aliases are correct
assert data["epm"] == data["tpm"]
assert data["eps"] == data["tps"]
# Check the values are correct
assert data["tpm"] == 5 / ((self.end - self.start).total_seconds() / 60)
assert data["tpm"] / 60 == data["tps"]
def test_count(self) -> None:
for _ in range(3):
self.store_transaction_metric(
150,
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
50,
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"count()",
],
)
result = query.run_query("test_query")
data = result["data"][0]
assert data["count"] == 6
def test_avg_duration(self) -> None:
for _ in range(3):
self.store_transaction_metric(
150,
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
50,
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"avg(transaction.duration)",
],
)
result = query.run_query("test_query")
data = result["data"][0]
assert data["avg_transaction_duration"] == 100
def test_avg_span_http(self) -> None:
for _ in range(3):
self.store_transaction_metric(
150,
metric="spans.http",
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
50,
metric="spans.http",
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"avg(spans.http)",
],
)
result = query.run_query("test_query")
data = result["data"][0]
assert data["avg_spans_http"] == 100
def test_failure_rate(self) -> None:
for _ in range(3):
self.store_transaction_metric(
100,
tags={"transaction.status": "internal_error"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
100,
tags={"transaction.status": "ok"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"failure_rate()",
"failure_count()",
],
)
result = query.run_query("test_query")
data = result["data"][0]
assert data["failure_rate"] == 0.5
assert data["failure_count"] == 3
def test_run_function_without_having_or_groupby(self) -> None:
self.store_transaction_metric(
1,
metric="user",
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"count_unique(user)",
],
)
primary, result = query._create_query_framework()
assert primary == "set"
def test_run_query_with_multiple_groupby_orderby_null_values_in_second_entity(self) -> None:
"""Since the null value is on count_unique(user) we will still get baz_transaction since we query distributions
first which will have it, and then just not find a unique count in the second"""
self.setup_orderby_data()
self.store_transaction_metric(
200,
tags={"transaction": "baz_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
query = MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
orderby="p95(transaction.duration)",
)
result = query.run_query("test_query")
assert len(result["data"]) == 3
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"bar_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 50,
"count_unique_user": 2,
}
assert result["data"][1] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 100,
"count_unique_user": 1,
}
assert result["data"][2] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"baz_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 200,
"count_unique_user": 0,
}
self.assertCountEqual(
result["meta"],
[
{"name": "transaction", "type": self.expected_tag_value_type},
{"name": "project", "type": "UInt64"},
{"name": "p95_transaction_duration", "type": "Float64"},
{"name": "count_unique_user", "type": "UInt64"},
],
)
@pytest.mark.skip(
reason="Currently cannot handle the case where null values are in the first entity"
)
def test_run_query_with_multiple_groupby_orderby_null_values_in_first_entity(self) -> None:
"""But if the null value is in the first entity, it won't show up in the groupby values, which means the
transaction will be missing"""
self.setup_orderby_data()
self.store_transaction_metric(200, tags={"transaction": "baz_transaction"})
query = MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
orderby="count_unique(user)",
)
result = query.run_query("test_query")
assert len(result["data"]) == 3
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"baz_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 200,
}
assert result["data"][1] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"foo_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 100,
"count_unique_user": 1,
}
assert result["data"][2] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"bar_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 50,
"count_unique_user": 2,
}
def test_multiple_entity_orderby_fails(self) -> None:
with pytest.raises(IncompatibleMetricsQuery):
query = MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug}",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
orderby=["-count_unique(user)", "p95(transaction.duration)"],
)
query.run_query("test_query")
def test_multiple_entity_query_fails(self) -> None:
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(
self.params,
query="p95(transaction.duration):>5s AND count_unique(user):>0",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
config=QueryBuilderConfig(
use_aggregate_conditions=True,
),
)
def test_query_entity_does_not_match_orderby(self) -> None:
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(
self.params,
query="count_unique(user):>0",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
orderby=["p95(transaction.duration)"],
config=QueryBuilderConfig(
use_aggregate_conditions=True,
),
)
def test_aggregate_query_with_multiple_entities_without_orderby(self) -> None:
self.store_transaction_metric(
200,
tags={"transaction": "baz_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
1,
metric="user",
tags={"transaction": "bar_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
1,
metric="user",
tags={"transaction": "baz_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
2,
metric="user",
tags={"transaction": "baz_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
# This will query both sets & distribution cause of selected columns
query = MetricsQueryBuilder(
self.params,
# Filter by count_unique since the default primary is distributions without an orderby
query="count_unique(user):>1",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
config=QueryBuilderConfig(
allow_metric_aggregates=True,
use_aggregate_conditions=True,
),
)
result = query.run_query("test_query")
assert len(result["data"]) == 1
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"baz_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 200,
"count_unique_user": 2,
}
self.assertCountEqual(
result["meta"],
[
{"name": "transaction", "type": self.expected_tag_value_type},
{"name": "project", "type": "UInt64"},
{"name": "p95_transaction_duration", "type": "Float64"},
{"name": "count_unique_user", "type": "UInt64"},
],
)
def test_aggregate_query_with_multiple_entities_with_orderby(self) -> None:
self.store_transaction_metric(
200,
tags={"transaction": "baz_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
1,
tags={"transaction": "bar_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
self.store_transaction_metric(
1,
metric="user",
tags={"transaction": "baz_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
)
# This will query both sets & distribution cause of selected columns
query = MetricsQueryBuilder(
self.params,
query="p95(transaction.duration):>100",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"project",
"p95(transaction.duration)",
"count_unique(user)",
],
orderby=["p95(transaction.duration)"],
config=QueryBuilderConfig(
allow_metric_aggregates=True,
use_aggregate_conditions=True,
),
)
result = query.run_query("test_query")
assert len(result["data"]) == 1
assert result["data"][0] == {
"transaction": resolve_tag_value(
UseCaseID.TRANSACTIONS,
self.organization.id,
"baz_transaction",
),
"project": self.project.id,
"p95_transaction_duration": 200,
"count_unique_user": 1,
}
self.assertCountEqual(
result["meta"],
[
{"name": "transaction", "type": self.expected_tag_value_type},
{"name": "project", "type": "UInt64"},
{"name": "p95_transaction_duration", "type": "Float64"},
{"name": "count_unique_user", "type": "UInt64"},
],
)
def test_invalid_column_arg(self) -> None:
for function in [
"count_unique(transaction.duration)",
"count_miserable(measurements.fcp)",
"p75(user)",
"count_web_vitals(user, poor)",
]:
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[function],
)
def test_orderby_field_alias(self) -> None:
query = MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"p95()",
],
orderby=["p95"],
)
assert len(query.orderby) == 1
assert query.orderby[0].exp == _metric_percentile_definition(
self.organization.id, "95", "transaction.duration", "p95"
)
query = MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"p95() as test",
],
orderby=["test"],
)
assert len(query.orderby) == 1
assert query.orderby[0].exp == _metric_percentile_definition(
self.organization.id, "95", "transaction.duration", "test"
)
def test_error_if_aggregates_disallowed(self) -> None:
def run_query(query, use_aggregate_conditions):
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"p95()",
"count_unique(user)",
],
query=query,
config=QueryBuilderConfig(
allow_metric_aggregates=False,
use_aggregate_conditions=use_aggregate_conditions,
),
)
queries = [
"p95():>5s",
"count_unique(user):>0",
"transaction:foo_transaction AND (!transaction:bar_transaction OR p95():>5s)",
]
for query in queries:
for use_aggregate_conditions in [True, False]:
run_query(query, use_aggregate_conditions)
def test_no_error_if_aggregates_disallowed_but_no_aggregates_included(self) -> None:
MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"p95()",
"count_unique(user)",
],
query="transaction:foo_transaction",
config=QueryBuilderConfig(
allow_metric_aggregates=False,
use_aggregate_conditions=True,
),
)
MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"p95()",
"count_unique(user)",
],
query="transaction:foo_transaction",
config=QueryBuilderConfig(
allow_metric_aggregates=False,
use_aggregate_conditions=False,
),
)
def test_multiple_dataset_but_no_data(self) -> None:
"""When there's no data from the primary dataset we shouldn't error out"""
result = MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"p50()",
"count_unique(user)",
],
config=QueryBuilderConfig(
allow_metric_aggregates=False,
use_aggregate_conditions=True,
),
).run_query("test")
assert len(result["data"]) == 1
data = result["data"][0]
assert data["count_unique_user"] == 0
# Handled by the discover transform later so its fine that this is nan
assert math.isnan(data["p50"])
@mock.patch("sentry.search.events.builder.metrics.indexer.resolve", return_value=-1)
def test_multiple_references_only_resolve_index_once(
self, mock_indexer: mock.MagicMock
) -> None:
MetricsQueryBuilder(
self.params,
query=f"project:{self.project.slug} transaction:foo_transaction transaction:foo_transaction",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"count_web_vitals(measurements.lcp, good)",
"count_web_vitals(measurements.lcp, good)",
"count_web_vitals(measurements.lcp, good)",
"count_web_vitals(measurements.lcp, good)",
"count_web_vitals(measurements.lcp, good)",
],
)
expected = [mock.call(UseCaseID.TRANSACTIONS, self.organization.id, "transaction")]
expected.extend(
[
mock.call(
UseCaseID.TRANSACTIONS,
self.organization.id,
constants.METRICS_MAP["measurements.lcp"],
),
mock.call(UseCaseID.TRANSACTIONS, self.organization.id, "measurement_rating"),
]
)
self.assertCountEqual(mock_indexer.mock_calls, expected)
def test_custom_measurement_allowed(self) -> None:
MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"transaction",
"avg(measurements.custom.measurement)",
"p50(measurements.custom.measurement)",
"p75(measurements.custom.measurement)",
"p90(measurements.custom.measurement)",
"p95(measurements.custom.measurement)",
"p99(measurements.custom.measurement)",
"p100(measurements.custom.measurement)",
"percentile(measurements.custom.measurement, 0.95)",
"sum(measurements.custom.measurement)",
"max(measurements.custom.measurement)",
"min(measurements.custom.measurement)",
"count_unique(user)",
],
query="transaction:foo_transaction",
config=QueryBuilderConfig(
allow_metric_aggregates=False,
use_aggregate_conditions=True,
),
)
def test_group_by_not_in_select(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=[
"p90(transaction.duration)",
"project",
],
groupby_columns=[
"transaction",
],
)
snql_query = query.get_snql_query().query
project = AliasedExpression(
Column("project_id"),
"project",
)
self.assertCountEqual(
snql_query.select,
[
_metric_percentile_definition(self.organization.id, "90"),
project,
],
)
self.assertCountEqual(
snql_query.groupby,
[project, self.build_transaction_transform("transaction")],
)
def test_missing_function(self) -> None:
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(
self.params,
query="",
selected_columns=[
"count_all_the_things_that_i_want()",
"transaction",
],
groupby_columns=[
"transaction",
],
)
def test_event_type_query_condition(self) -> None:
query = MetricsQueryBuilder(
self.params,
query="event.type:transaction",
dataset=Dataset.PerformanceMetrics,
selected_columns=[],
)
self.assertCountEqual(query.where, self.default_conditions)
def test_invalid_event_type_query_condition(self) -> None:
with pytest.raises(IncompatibleMetricsQuery):
MetricsQueryBuilder(
self.params,
query="!event.type:transaction",
dataset=Dataset.PerformanceMetrics,
selected_columns=[],
)
@mock.patch(
"sentry.search.events.datasets.metrics.MetricsDatasetConfig.function_converter",
new_callable=mock.PropertyMock,
return_value={
"count_unique": fields.MetricsFunction(
"count_unique",
required_args=[fields.MetricArg("column", allowed_columns=["mocked_gauge"])],
snql_set=lambda args, alias: None, # Doesn't matter what this returns
)
},
)
@mock.patch.dict(
"sentry.search.events.builder.metrics.constants.METRICS_MAP",
{"mocked_gauge": "g:mock/mocked_gauge@none"},
)
def test_missing_function_implementation_for_metric_type(
self, _mocked_function_converter: mock.MagicMock
) -> None:
# Mocks count_unique to allow the mocked_gauge column
# but the metric type does not have a gauge implementation
with pytest.raises(IncompatibleMetricsQuery) as err:
MetricsQueryBuilder(
self.params,
dataset=Dataset.PerformanceMetrics,
query="",
selected_columns=[
"count_unique(mocked_gauge)",
],
)
assert str(err.value) == "The functions provided do not match the requested metric type"
def test_free_text_search(self) -> None:
query = MetricsQueryBuilder(
self.params,
dataset=None,
query="foo",
selected_columns=["count()"],
)
self.maxDiff = 100000
transaction_key = indexer.resolve(
UseCaseID.TRANSACTIONS, self.organization.id, "transaction"
)
self.assertCountEqual(
query.where,
[
Condition(
Function(
"positionCaseInsensitive",
[
Column(f"tags[{transaction_key}]"),
"foo",
],
),
Op.NEQ,
0,
),
Condition(
Column("metric_id"),
Op.IN,
[
indexer.resolve(
UseCaseID.TRANSACTIONS,
self.organization.id,
"d:transactions/duration@millisecond",
)
],
),
*self.default_conditions,
],
)
| MetricQueryBuilderTest |
python | ansible__ansible | lib/ansible/_internal/_ssh/_ssh_agent.py | {
"start": 11018,
"end": 11364
} | class ____(PrivateKeyMsg):
type: KeyAlgo
ecdsa_curve_name: unicode_string
Q: binary_string
d: mpint
comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
constraints: constraints = dataclasses.field(default=constraints(b''))
@dataclasses.dataclass(order=True, slots=True)
| EcdsaPrivateKeyMsg |
python | sympy__sympy | sympy/physics/quantum/pauli.py | {
"start": 2963,
"end": 4797
} | class ____(SigmaOpBase):
"""Pauli sigma y operator
Parameters
==========
name : str
An optional string that labels the operator. Pauli operators with
different names commute.
Examples
========
>>> from sympy.physics.quantum import represent
>>> from sympy.physics.quantum.pauli import SigmaY
>>> sy = SigmaY()
>>> sy
SigmaY()
>>> represent(sy)
Matrix([
[0, -I],
[I, 0]])
"""
def __new__(cls, *args, **hints):
return SigmaOpBase.__new__(cls, *args)
def _eval_commutator_SigmaZ(self, other, **hints):
if self.name != other.name:
return S.Zero
else:
return 2 * I * SigmaX(self.name)
def _eval_commutator_SigmaX(self, other, **hints):
if self.name != other.name:
return S.Zero
else:
return - 2 * I * SigmaZ(self.name)
def _eval_anticommutator_SigmaX(self, other, **hints):
return S.Zero
def _eval_anticommutator_SigmaZ(self, other, **hints):
return S.Zero
def _eval_adjoint(self):
return self
def _print_contents_latex(self, printer, *args):
if self.use_name:
return r'{\sigma_y^{(%s)}}' % str(self.name)
else:
return r'{\sigma_y}'
def _print_contents(self, printer, *args):
return 'SigmaY()'
def _eval_power(self, e):
if e.is_Integer and e.is_positive:
return SigmaY(self.name).__pow__(int(e) % 2)
def _represent_default_basis(self, **options):
format = options.get('format', 'sympy')
if format == 'sympy':
return Matrix([[0, -I], [I, 0]])
else:
raise NotImplementedError('Representation in format ' +
format + ' not implemented.')
| SigmaY |
python | PrefectHQ__prefect | tests/cli/test_work_queues.py | {
"start": 13532,
"end": 16042
} | class ____:
def test_inspect(self, work_queue):
invoke_and_assert(
command=f"work-queue inspect {work_queue.name}",
expected_output_contains=[
f"id='{work_queue.id}'",
f"name={work_queue.name!r}",
],
expected_code=0,
)
def test_inspect_by_id(self, work_queue):
invoke_and_assert(
command=f"work-queue inspect {work_queue.id}",
expected_output_contains=[
f"id='{work_queue.id}'",
f"name={work_queue.name!r}",
],
expected_code=0,
)
def test_inspect_with_pool(
self,
work_queue_1,
):
cmd = f"work-queue inspect {work_queue_1.name} -p {work_queue_1.work_pool.name}"
invoke_and_assert(
command=cmd,
expected_output_contains=[
f"id='{work_queue_1.id}'",
f"name={work_queue_1.name!r}",
],
expected_code=0,
)
# Tests all of the above, but with bad input
def test_inspect_bad_input_work_queue_name(self, work_queue):
invoke_and_assert(
command=f"work-queue inspect {work_queue.name}-bad",
expected_code=1,
)
def test_inspect_bad_input_work_queue_id(self, work_queue):
invoke_and_assert(
command=f"work-queue inspect {work_queue.id}-bad",
expected_code=1,
)
def test_inspect_bad_input_work_pool(
self,
work_queue_1,
):
cmd = (
f"work-queue inspect {work_queue_1.name} "
f"-p {work_queue_1.work_pool.name}-bad"
)
invoke_and_assert(
command=cmd,
expected_code=1,
)
def test_inspect_with_json_output(self, work_queue):
"""Test work-queue inspect command with JSON output flag."""
import json
result = invoke_and_assert(
command=f"work-queue inspect {work_queue.name} --output json",
expected_code=0,
)
# Parse JSON output and verify it's valid JSON
output_data = json.loads(result.stdout.strip())
# Verify key fields are present
assert "id" in output_data
assert "name" in output_data
assert "status_details" in output_data # Combined status information
assert output_data["id"] == str(work_queue.id)
assert output_data["name"] == work_queue.name
| TestInspectWorkQueue |
python | networkx__networkx | networkx/algorithms/shortest_paths/tests/test_generic.py | {
"start": 19930,
"end": 20609
} | class ____:
@classmethod
def setup_class(cls):
global np
import pytest
np = pytest.importorskip("numpy")
def test_specified_methods_numpy(self):
G = nx.Graph()
nx.add_cycle(G, range(7), weight=2)
ans = nx.average_shortest_path_length(
G, weight="weight", method="floyd-warshall-numpy"
)
np.testing.assert_almost_equal(ans, 4)
G = nx.Graph()
nx.add_path(G, range(5), weight=2)
ans = nx.average_shortest_path_length(
G, weight="weight", method="floyd-warshall-numpy"
)
np.testing.assert_almost_equal(ans, 4)
| TestAverageShortestPathLengthNumpy |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0108_migrate_language_code.py | {
"start": 610,
"end": 831
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0107_alter_project_language"),
]
operations = [
migrations.RunPython(forwards_func),
]
| Migration |
python | PrefectHQ__prefect | tests/runner/test_runner.py | {
"start": 2885,
"end": 2992
} | class ____:
@flow
@staticmethod
def dummy_flow_staticmethod():
pass
| ClassNameStaticmethod |
python | python-markdown__markdown | tests/test_apis.py | {
"start": 4888,
"end": 6170
} | class ____(unittest.TestCase):
""" Tests of the State class for `BlockParser`. """
def setUp(self):
self.state = markdown.blockparser.State()
def testBlankState(self):
""" Test State when empty. """
self.assertEqual(self.state, [])
def testSetSate(self):
""" Test State.set(). """
self.state.set('a_state')
self.assertEqual(self.state, ['a_state'])
self.state.set('state2')
self.assertEqual(self.state, ['a_state', 'state2'])
def testIsSate(self):
""" Test `State.isstate()`. """
self.assertEqual(self.state.isstate('anything'), False)
self.state.set('a_state')
self.assertEqual(self.state.isstate('a_state'), True)
self.state.set('state2')
self.assertEqual(self.state.isstate('state2'), True)
self.assertEqual(self.state.isstate('a_state'), False)
self.assertEqual(self.state.isstate('missing'), False)
def testReset(self):
""" Test `State.reset()`. """
self.state.set('a_state')
self.state.reset()
self.assertEqual(self.state, [])
self.state.set('state1')
self.state.set('state2')
self.state.reset()
self.assertEqual(self.state, ['state1'])
| TestBlockParserState |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/mixins.py | {
"start": 424,
"end": 7217
} | class ____:
"""
This mixin decouples Django OAuth Toolkit from OAuthLib.
Users can configure the Server, Validator and OAuthlibCore
classes used by this mixin by setting the following class
variables:
* server_class
* validator_class
* oauthlib_backend_class
If these class variables are not set, it will fall back to using the classes
specified in oauth2_settings (OAUTH2_SERVER_CLASS, OAUTH2_VALIDATOR_CLASS
and OAUTH2_BACKEND_CLASS).
"""
server_class = None
validator_class = None
oauthlib_backend_class = None
@classmethod
def get_server_class(cls):
"""
Return the OAuthlib server class to use
"""
if cls.server_class is None:
return oauth2_settings.OAUTH2_SERVER_CLASS
else:
return cls.server_class
@classmethod
def get_validator_class(cls):
"""
Return the RequestValidator implementation class to use
"""
if cls.validator_class is None:
return oauth2_settings.OAUTH2_VALIDATOR_CLASS
else:
return cls.validator_class
@classmethod
def get_oauthlib_backend_class(cls):
"""
Return the OAuthLibCore implementation class to use
"""
if cls.oauthlib_backend_class is None:
return oauth2_settings.OAUTH2_BACKEND_CLASS
else:
return cls.oauthlib_backend_class
@classmethod
def get_server(cls):
"""
Return an instance of `server_class` initialized with a `validator_class`
object
"""
server_class = cls.get_server_class()
validator_class = cls.get_validator_class()
server_kwargs = oauth2_settings.server_kwargs
return server_class(validator_class(), **server_kwargs)
@classmethod
def get_oauthlib_core(cls):
"""
Cache and return `OAuthlibCore` instance so it will be created only on first request
unless ALWAYS_RELOAD_OAUTHLIB_CORE is True.
"""
if not hasattr(cls, "_oauthlib_core") or oauth2_settings.ALWAYS_RELOAD_OAUTHLIB_CORE:
server = cls.get_server()
core_class = cls.get_oauthlib_backend_class()
cls._oauthlib_core = core_class(server)
return cls._oauthlib_core
def validate_authorization_request(self, request):
"""
A wrapper method that calls validate_authorization_request on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.validate_authorization_request(request)
def create_authorization_response(self, request, scopes, credentials, allow):
"""
A wrapper method that calls create_authorization_response on `server_class`
instance.
:param request: The current django.http.HttpRequest object
:param scopes: A space-separated string of provided scopes
:param credentials: Authorization credentials dictionary containing
`client_id`, `state`, `redirect_uri` and `response_type`
:param allow: True if the user authorize the client, otherwise False
"""
# TODO: move this scopes conversion from and to string into a utils function
scopes = scopes.split(" ") if scopes else []
core = self.get_oauthlib_core()
return core.create_authorization_response(request, scopes, credentials, allow)
def create_device_authorization_response(self, request: HttpRequest):
"""
A wrapper method that calls create_device_authorization_response on `server_class`
instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.create_device_authorization_response(request)
def create_token_response(self, request):
"""
A wrapper method that calls create_token_response on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.create_token_response(request)
def create_revocation_response(self, request):
"""
A wrapper method that calls create_revocation_response on the
`server_class` instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.create_revocation_response(request)
def create_userinfo_response(self, request):
"""
A wrapper method that calls create_userinfo_response on the
`server_class` instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.create_userinfo_response(request)
def verify_request(self, request):
"""
A wrapper method that calls verify_request on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
try:
return core.verify_request(request, scopes=self.get_scopes())
except ValueError as error:
if str(error) == "Invalid hex encoding in query string.":
raise SuspiciousOperation(error)
else:
raise
def get_scopes(self):
"""
This should return the list of scopes required to access the resources.
By default it returns an empty list.
"""
return []
def error_response(self, error, **kwargs):
"""
Return an error to be displayed to the resource owner if anything goes awry.
:param error: :attr:`OAuthToolkitError`
"""
oauthlib_error = error.oauthlib_error
redirect_uri = oauthlib_error.redirect_uri or ""
separator = "&" if "?" in redirect_uri else "?"
error_response = {
"error": oauthlib_error,
"url": redirect_uri + separator + oauthlib_error.urlencoded,
}
error_response.update(kwargs)
# If we got a malicious redirect_uri or client_id, we will *not* redirect back to the URL.
if isinstance(error, FatalClientError):
redirect = False
else:
redirect = True
return redirect, error_response
def authenticate_client(self, request):
"""Returns a boolean representing if client is authenticated with client credentials
method. Returns `True` if authenticated.
:param request: The current django.http.HttpRequest object
"""
core = self.get_oauthlib_core()
return core.authenticate_client(request)
| OAuthLibMixin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.