language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | google__pytype | pytype/inspect/graph_test.py | {
"start": 117,
"end": 656
} | class ____(unittest.TestCase):
def setUp(self):
super().setUp()
self.prog = cfg.Program()
self.current_location = self.prog.NewCFGNode()
def test_program_to_dot(self):
v1 = self.prog.NewVariable()
b = v1.AddBinding("x", [], self.current_location)
n = self.current_location.ConnectNew()
v2 = self.prog.NewVariable()
v2.AddBinding("y", {b}, n)
# smoke test
tg = graph.TypeGraph(self.prog, set(), False)
assert isinstance(tg.to_dot(), str)
if __name__ == "__main__":
unittest.main()
| GraphTest |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/comments.py | {
"start": 33548,
"end": 36296
} | class ____(CommentedBase, Mapping): # type: ignore
__slots__ = Comment.attrib, '_od'
"""This primarily exists to be able to roundtrip keys that are mappings"""
def __init__(self, *args, **kw):
# type: (Any, Any) -> None
if hasattr(self, '_od'):
raise_immutable(self)
try:
self._od = ordereddict(*args, **kw)
except TypeError:
raise
__delitem__ = __setitem__ = clear = pop = popitem = setdefault = update = raise_immutable
# need to implement __getitem__, __iter__ and __len__
def __getitem__(self, index):
# type: (Any) -> Any
return self._od[index]
def __iter__(self):
# type: () -> Iterator[Any]
for x in self._od.__iter__():
yield x
def __len__(self):
# type: () -> int
return len(self._od)
def __hash__(self):
# type: () -> Any
return hash(tuple(self.items()))
def __repr__(self):
# type: () -> Any
if not hasattr(self, merge_attrib):
return self._od.__repr__()
return 'ordereddict(' + repr(list(self._od.items())) + ')'
@classmethod
def fromkeys(keys, v=None):
# type: (Any, Any) -> Any
return CommentedKeyMap(dict.fromkeys(keys, v))
def _yaml_add_comment(self, comment, key=NoComment):
# type: (Any, Optional[Any]) -> None
if key is not NoComment:
self.yaml_key_comment_extend(key, comment)
else:
self.ca.comment = comment
def _yaml_add_eol_comment(self, comment, key):
# type: (Any, Any) -> None
self._yaml_add_comment(comment, key=key)
def _yaml_get_columnX(self, key):
# type: (Any) -> Any
return self.ca.items[key][0].start_mark.column
def _yaml_get_column(self, key):
# type: (Any) -> Any
column = None
sel_idx = None
pre, post = key - 1, key + 1
if pre in self.ca.items:
sel_idx = pre
elif post in self.ca.items:
sel_idx = post
else:
# self.ca.items is not ordered
for row_idx, _k1 in enumerate(self):
if row_idx >= key:
break
if row_idx not in self.ca.items:
continue
sel_idx = row_idx
if sel_idx is not None:
column = self._yaml_get_columnX(sel_idx)
return column
def _yaml_get_pre_comment(self):
# type: () -> Any
pre_comments = [] # type: List[Any]
if self.ca.comment is None:
self.ca.comment = [None, pre_comments]
else:
self.ca.comment[1] = pre_comments
return pre_comments
| CommentedKeyMap |
python | jazzband__django-formtools | tests/wizard/test_forms.py | {
"start": 1522,
"end": 1975
} | class ____(WizardView):
storage_name = 'formtools.wizard.storage.session.SessionStorage'
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
return response, self
def get_form_kwargs(self, step, *args, **kwargs):
kwargs = super().get_form_kwargs(step, *args, **kwargs)
if step == 'kwargs_test':
kwargs['test'] = True
return kwargs
| TestWizard |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels10.py | {
"start": 315,
"end": 1636
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels10.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter"})
chart.axis_ids = [45740416, 45705856]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"data_labels": {"value": 1, "position": "left"},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
"data_labels": {"value": 1, "position": "center"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 7470,
"end": 7686
} | class ____(ShowFieldType, PolymorphicModel):
bar = models.CharField(max_length=300)
def __init__(self, *args, **kwargs):
kwargs["bar"] = self.x()
super().__init__(*args, **kwargs)
| InitTestModel |
python | run-llama__llama_index | llama-index-instrumentation/src/llama_index_instrumentation/event_handlers/null.py | {
"start": 117,
"end": 402
} | class ____(BaseEventHandler):
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "NullEventHandler"
def handle(self, event: BaseEvent, **kwargs: Any) -> Any:
"""Handle logic - null handler does nothing."""
return
| NullEventHandler |
python | astropy__astropy | astropy/uncertainty/core.py | {
"start": 27505,
"end": 27698
} | class ____(_DistributionRepr, ArrayDistribution):
pass
# Ensure our base NdarrayDistribution is known.
Distribution._generated_subclasses[np.ndarray] = NdarrayDistribution
| NdarrayDistribution |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/argparsing/parsers.py | {
"start": 9515,
"end": 10157
} | class ____(DynamicChoicesParser):
"""Composite argument parser which relies on a static list of choices."""
def __init__(self, choices: list[str], conditions: MatchConditions = MatchConditions.CHOICE) -> None:
self.choices = choices
super().__init__(conditions=conditions)
def get_choices(self, value: str) -> list[str]:
"""Return a list of valid choices based on the given input value."""
return self.choices
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return '|'.join(self.choices)
| ChoicesParser |
python | pytorch__pytorch | test/inductor/test_custom_lowering.py | {
"start": 643,
"end": 8847
} | class ____(InductorTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.test_inductor_ops = torch.library.Library( # noqa: TOR901
"test_inductor_ops", "DEF"
)
cls.device_list = ["Meta", "CUDA", "XPU"]
for device in cls.device_list:
setattr(
cls,
"impl_" + device.lower(),
torch.library.Library( # noqa: TOR901
"test_inductor_ops", "IMPL", device
),
)
cls._register_jagged_to_padded_dense()
cls._register_asm_op()
@classmethod
def tearDown(cls):
super().tearDownClass()
@classmethod
def _register_jagged_to_padded_dense(cls):
# Approximation of fbgemm.jagged_to_padded_dense_forward
cls.test_inductor_ops.define(
"jagged_to_padded_dense(Tensor input, Tensor offsets, SymInt max_seq_len, Scalar pad_value) -> Tensor"
)
def j2pd_meta(inp, offsets, max_seq_len, pad_value):
return torch.empty(
(offsets.shape[0] - 1, max_seq_len, inp.shape[1]),
device=inp.device,
dtype=inp.dtype,
)
def j2pd_gpu(inp, offsets, max_seq_len, pad_value):
res = torch.full(
(offsets.shape[0] - 1, max_seq_len, inp.shape[1]),
pad_value,
device=inp.device,
dtype=inp.dtype,
)
for b in range(offsets.shape[0] - 1):
for r in range(offsets[b + 1] - offsets[b]):
res[b][r] = inp[offsets[b] + r]
return res
def j2pd_lowering(inp, offsets, max_seq_len, pad_value):
offsets_loader = offsets.make_loader()
inp_loader = inp.make_loader()
jagged_len = inp.get_size()[0]
offsets_dtype = offsets.get_dtype()
def inner_fn(index):
batch_idx, seq_idx, emb_idx = index
begin_idx = ops.indirect_indexing(
offsets_loader([batch_idx]),
jagged_len + 1,
)
end_idx = offsets_loader([batch_idx + 1])
jagged_idx = begin_idx + seq_idx
return ops.masked(
ops.lt(
ops.index_expr(jagged_idx, offsets_dtype),
end_idx,
),
lambda: inp_loader([jagged_idx, emb_idx]),
pad_value,
)
return Pointwise.create(
device=inp.get_device(),
dtype=inp.get_dtype(),
inner_fn=inner_fn,
ranges=[offsets.get_size()[0] - 1, max_seq_len, inp.get_size()[1]],
)
register_lowering(
torch.ops.test_inductor_ops.jagged_to_padded_dense, type_promotion_kind=None
)(j2pd_lowering)
cls.impl_meta.impl("jagged_to_padded_dense", j2pd_meta)
cls.impl_cuda.impl("jagged_to_padded_dense", j2pd_gpu)
cls.impl_xpu.impl("jagged_to_padded_dense", j2pd_gpu)
@classmethod
def _register_asm_op(cls):
# Approximation of fbgemm.jagged_to_padded_dense_forward
cls.test_inductor_ops.define("tanh_approx(Tensor input) -> Tensor")
def tanh_approx_meta(inp):
return torch.tanh(inp)
cls.impl_meta.impl("tanh_approx", tanh_approx_meta)
def tanh_approx_lowering(inp):
fn = partial(ops.inline_asm_elementwise, asm="tanh.approx.f32 $0, $1;")
return make_pointwise(fn)(inp)
register_lowering(
torch.ops.test_inductor_ops.tanh_approx, type_promotion_kind=None
)(tanh_approx_lowering)
cls.test_inductor_ops.define("add_custom(Tensor a, Tensor b) -> Tensor")
def add_custom(a, b):
return a + b
cls.impl_meta.impl("add_custom", add_custom)
def add_custom_lowering(a, b):
fn = partial(ops.inline_asm_elementwise, asm="add.f32 $0, $1, $2;")
return make_pointwise(fn)(a, b)
register_lowering(
torch.ops.test_inductor_ops.add_custom, type_promotion_kind=None
)(add_custom_lowering)
def test_register_lowering_custom_dict(self):
custom_lowering_dict = {}
from torch._inductor.lowering import register_lowering
@torch.library.custom_op("helion_test::foo", mutates_args={})
def foo(x: torch.Tensor) -> torch.Tensor:
return x
@register_lowering(
torch.ops.helion_test.foo, lowering_dict=custom_lowering_dict
)
def foo_lowering(x):
return x
assert torch.ops.helion_test.foo in custom_lowering_dict
assert torch.ops.helion_test.foo not in torch._inductor.lowering.lowerings
@requires_gpu()
@skipIf(GPU_TYPE == "mps", "Not applicable to MPS")
def test_jagged_to_padded_dense_sanity_cuda(self):
def fn(inp, offsets, max_seq_len):
return torch.ops.test_inductor_ops.jagged_to_padded_dense(
inp, offsets, max_seq_len, 60.0
)
inp = torch.rand((9, 96), device=GPU_TYPE)
offsets = torch.tensor([0, 2, 5, 9], dtype=torch.int32, device=GPU_TYPE)
max_seq_len = 4
res = fn(inp, offsets, max_seq_len)
self.assertEqual(inp[0], res[0][0])
self.assertEqual(inp[1], res[0][1])
self.assertEqual(inp[2], res[1][0])
self.assertEqual(inp[3], res[1][1])
self.assertEqual(inp[5], res[2][0])
self.assertEqual(inp[8], res[2][3])
fn_opt = torch.compile(fn)
self.assertEqual(
fn(inp, offsets, max_seq_len), fn_opt(inp, offsets, max_seq_len)
)
@requires_gpu()
@skipIf(GPU_TYPE == "mps", "Not applicable to MPS")
def test_jagged_to_padded_dense_zero_size(self):
# Previously, the masking was being completely stripped for the
# masked load of the input value. That would lead to an IMA
# because cuda was trying to read index 0 of a zero-size tensor.
def fn(inp, offsets, max_seq_len):
inp = torch.bmm(inp, torch.ones((1, 96, 1), device=GPU_TYPE)).view((0, 1))
return torch.ops.test_inductor_ops.jagged_to_padded_dense(
inp, offsets, max_seq_len, 60.0
)
inp = torch.rand((1, 0, 96), device=GPU_TYPE)
offsets = torch.zeros(1025, device=GPU_TYPE, dtype=torch.int32)
max_seq_len = 20
fn_opt = torch.compile(fn)
self.assertEqual(
fn(inp, offsets, max_seq_len), fn_opt(inp, offsets, max_seq_len)
)
@requires_gpu()
@skipIfRocm
@skipIfXpu(msg="https://github.com/intel/torch-xpu-ops/issues/2328")
@skipIf(GPU_TYPE == "mps", "Not applicable to MPS")
def test_tanh_approx(self):
def fn(inp):
return torch.ops.test_inductor_ops.tanh_approx(inp)
inp = torch.randn(32, device=GPU_TYPE)
fn_opt = torch.compile(fn)
a = torch.tanh(inp)
b = fn_opt(inp)
self.assertEqual(a, b)
@requires_gpu()
@skipIfRocm
@skipIfXpu(msg="https://github.com/intel/torch-xpu-ops/issues/2328")
@skipIf(GPU_TYPE == "mps", "Not applicable to MPS")
def test_multi_inp_asm(self):
def fn(a, b):
return torch.ops.test_inductor_ops.add_custom(a, b)
a = torch.randn(32, device=GPU_TYPE)
b = torch.randn(32, device=GPU_TYPE)
fn_opt = torch.compile(fn)
out1 = a + b
out2 = fn_opt(a, b)
self.assertEqual(out1, out2)
@config.patch(joint_graph_constant_folding=False)
def test_constant_creation(self):
class M(torch.nn.Module):
def forward(self, x):
return x + torch.tensor(1)
make_fallback(torch.ops.aten.lift_fresh_copy.default)
self.assertTrue(
torch.allclose(torch.compile(M())(torch.ones(3)), torch.ones(3) + 1)
)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_CPU or HAS_GPU:
run_tests(needs="filelock")
| TestCustomLowering |
python | realpython__materials | pygame-a-primer/py_tutfinal.py | {
"start": 1580,
"end": 4228
} | class ____(pygame.sprite.Sprite):
def __init__(self):
super(Enemy, self).__init__()
self.surf = pygame.Surface((20, 10))
self.surf.fill((255, 255, 255))
self.rect = self.surf.get_rect(
center=(
random.randint(SCREEN_WIDTH + 20, SCREEN_WIDTH + 100),
random.randint(0, SCREEN_HEIGHT),
)
)
self.speed = random.randint(5, 20)
# Move the sprite based on speed
# Remove it when it passes the left edge of the screen
def update(self):
self.rect.move_ip(-self.speed, 0)
if self.rect.right < 0:
self.kill()
# Initialize pygame
pygame.init()
# Create the screen object
# The size is determined by the constant SCREEN_WIDTH and SCREEN_HEIGHT
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
# Create a custom event for adding a new enemy.
ADDENEMY = pygame.USEREVENT + 1
pygame.time.set_timer(ADDENEMY, 250)
# Create our 'player'
player = Player()
# Create groups to hold enemy sprites, and every sprite
# - enemies is used for collision detection and position updates
# - all_sprites is used for rendering
enemies = pygame.sprite.Group()
all_sprites = pygame.sprite.Group()
all_sprites.add(player)
# Variable to keep our main loop running
running = True
# Our main loop
while running:
# Look at every event in the queue
for event in pygame.event.get():
# Did the user hit a key?
if event.type == KEYDOWN:
# Was it the Escape key? If so, stop the loop
if event.key == K_ESCAPE:
running = False
# Did the user click the window close button? If so, stop the loop
elif event.type == QUIT:
running = False
# Should we add a new enemy?
elif event.type == ADDENEMY:
# Create the new enemy, and add it to our sprite groups
new_enemy = Enemy()
enemies.add(new_enemy)
all_sprites.add(new_enemy)
# Get the set of keys pressed and check for user input
pressed_keys = pygame.key.get_pressed()
player.update(pressed_keys)
# Update the position of our enemies
enemies.update()
# Fill the screen with black
screen.fill((0, 0, 0))
# Draw all our sprites
for entity in all_sprites:
screen.blit(entity.surf, entity.rect)
# Check if any enemies have collided with the player
if pygame.sprite.spritecollideany(player, enemies):
# If so, remove the player and stop the loop
player.kill()
running = False
# Flip everything to the display
pygame.display.flip()
| Enemy |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc_strides.py | {
"start": 3791,
"end": 3894
} | class ____(UnaryFP):
data_finite = False
data_denormal = True
data_zeros = True
| UnaryFPSpecial |
python | getsentry__sentry | src/sentry/api/permissions.py | {
"start": 1581,
"end": 1854
} | class ____(BasePermission):
"""
This permission class is used for endpoints that should ONLY be accessible
by superuser.
"""
def has_permission(self, request: Request, view: object) -> bool:
return is_active_superuser(request)
| SuperuserPermission |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 9614,
"end": 10383
} | class ____(PipError):
"""HTTP connection error"""
def __init__(
self,
error_msg: str,
response: Response | None = None,
request: Request | PreparedRequest | None = None,
) -> None:
"""
Initialize NetworkConnectionError with `request` and `response`
objects.
"""
self.response = response
self.request = request
self.error_msg = error_msg
if (
self.response is not None
and not self.request
and hasattr(response, "request")
):
self.request = self.response.request
super().__init__(error_msg, response, request)
def __str__(self) -> str:
return str(self.error_msg)
| NetworkConnectionError |
python | getsentry__sentry | src/sentry/incidents/endpoints/serializers/alert_rule.py | {
"start": 14474,
"end": 19974
} | class ____(Serializer):
def __init__(self, expand: list[str] | None = None):
self.expand = expand or []
def get_attrs(
self, item_list: Sequence[Any], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> MutableMapping[Any, Any]:
results = super().get_attrs(item_list, user)
alert_rules = [x for x in item_list if isinstance(x, AlertRule)]
incident_map = {}
if "latestIncident" in self.expand:
for incident in Incident.objects.filter(id__in=[x.incident_id for x in alert_rules]): # type: ignore[attr-defined]
incident_map[incident.id] = serialize(incident, user=user)
serialized_alert_rules = serialize(alert_rules, user=user)
serialized_alert_rule_map_by_id = {
serialized_alert["id"]: serialized_alert for serialized_alert in serialized_alert_rules
}
serialized_issue_rules = serialize(
[x for x in item_list if isinstance(x, Rule)],
user=user,
serializer=RuleSerializer(expand=self.expand),
)
serialized_issue_rule_map_by_id = {
serialized_rule["id"]: serialized_rule for serialized_rule in serialized_issue_rules
}
uptime_detectors = [
x
for x in item_list
if isinstance(x, Detector) and x.type == GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE
]
serialized_uptime_detectors = serialize(
uptime_detectors,
user=user,
serializer=UptimeDetectorSerializer(),
)
serialized_uptime_detector_map_by_id = {
item["id"]: item for item in serialized_uptime_detectors
}
serialized_cron_monitors = serialize(
[x for x in item_list if isinstance(x, Monitor)],
user=user,
)
serialized_cron_monitor_map_by_guid = {
item["id"]: item for item in serialized_cron_monitors
}
for item in item_list:
item_id = str(item.id)
if isinstance(item, AlertRule) and item_id in serialized_alert_rule_map_by_id:
# This is a metric alert rule
serialized_alert_rule = serialized_alert_rule_map_by_id[item_id]
if "latestIncident" in self.expand:
# Eg. we _have_ an incident
try:
serialized_alert_rule["latestIncident"] = incident_map.get(item.incident_id) # type: ignore[attr-defined]
except AttributeError as e:
logger.exception(
"incident serialization error",
extra={
"exception": e,
"alert_rule_id": item_id,
"is_metric_alert": isinstance(item, AlertRule),
"is_issue_alert": isinstance(item, Rule),
},
)
results[item] = serialized_alert_rule
elif isinstance(item, Rule) and item_id in serialized_issue_rule_map_by_id:
# This is an issue alert rule
results[item] = serialized_issue_rule_map_by_id[item_id]
elif (
isinstance(item, Detector)
and item.type == GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE
and item_id in serialized_uptime_detector_map_by_id
):
# This is an uptime detector
results[item] = serialized_uptime_detector_map_by_id[item_id]
elif (
# XXX(epurkhiser): Monitors use their GUID as their IDs
isinstance(item, Monitor)
and str(item.guid) in serialized_cron_monitor_map_by_guid
):
# This is a cron monitor
results[item] = serialized_cron_monitor_map_by_guid[str(item.guid)]
else:
logger.error(
"Alert Rule found but dropped during serialization",
extra={
"id": item_id,
"issue_rule": isinstance(item, Rule),
"metric_rule": isinstance(item, AlertRule),
"uptime_rule": (
isinstance(item, Detector)
and item.type == GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE
),
"crons_rule": isinstance(item, Monitor),
},
)
return results
def serialize(
self,
obj: Rule | AlertRule | Detector | Monitor,
attrs: Mapping[Any, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> MutableMapping[Any, Any]:
updated_attrs = {**attrs}
if isinstance(obj, AlertRule):
# Mark that we're using legacy AlertRule models
report_used_legacy_models()
updated_attrs["type"] = "alert_rule"
elif isinstance(obj, Rule):
updated_attrs["type"] = "rule"
elif isinstance(obj, Detector) and obj.type == GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE:
updated_attrs["type"] = "uptime"
elif isinstance(obj, Monitor):
updated_attrs["type"] = "monitor"
else:
raise AssertionError(f"Invalid rule to serialize: {type(obj)}")
return updated_attrs
| CombinedRuleSerializer |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_webagg.py | {
"start": 10904,
"end": 11014
} | class ____(_Backend):
FigureCanvas = FigureCanvasWebAgg
FigureManager = FigureManagerWebAgg
| _BackendWebAgg |
python | apache__airflow | providers/docker/src/airflow/providers/docker/exceptions.py | {
"start": 985,
"end": 1335
} | class ____(AirflowException):
"""
Raised when a Docker container returns an error.
:param logs: The log output of the failed Docker container
"""
def __init__(self, message: str | None = None, logs: list[str | bytes] | None = None) -> None:
super().__init__(message)
self.logs = logs
| DockerContainerFailedException |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/data_adapter.py | {
"start": 24904,
"end": 27424
} | class ____(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
@staticmethod
def can_handle(x, y=None):
return (isinstance(x, (data_types.DatasetV1, data_types.DatasetV2)) or
_is_distributed_dataset(x))
def __init__(self,
x,
y=None,
sample_weights=None,
steps=None,
**kwargs):
super(DatasetAdapter, self).__init__(x, y, **kwargs)
# Note that the dataset instance is immutable, its fine to reuse the user
# provided dataset.
self._dataset = x
# The user-provided steps.
self._user_steps = steps
self._validate_args(y, sample_weights, steps)
def get_dataset(self):
return self._dataset
def get_size(self):
return # Inferred in `DataHandler`.
def batch_size(self):
return None
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
def should_recreate_iterator(self):
# Since DistributedDatasets have no cardinality, the user must provide
# all steps that need to be run, calling `.repeat()` as needed.
if _is_distributed_dataset(self._dataset):
return False
# If user doesn't supply `steps`, or if they supply `steps` that
# exactly equals the size of the `Dataset`, create a new iterator
# each epoch.
return (self._user_steps is None or
cardinality.cardinality(self._dataset).numpy() == self._user_steps)
def _validate_args(self, y, sample_weights, steps):
"""Validates `__init__` arguments."""
# Arguments that shouldn't be passed.
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"dataset as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"dataset as input.")
if steps is None:
if _is_distributed_dataset(self._dataset):
raise ValueError("When providing a distributed dataset, you must "
"specify the number of steps to run.")
size = cardinality.cardinality(self._dataset).numpy()
if size == cardinality.INFINITE and steps is None:
raise ValueError(
"When providing an infinite dataset, you must specify "
"the number of steps to run (if you did not intend to "
"create an infinite dataset, make sure to not call "
"`repeat()` on the dataset).")
| DatasetAdapter |
python | readthedocs__readthedocs.org | readthedocs/api/v2/views/model_views.py | {
"start": 2894,
"end": 3519
} | class ____(BaseRenderer):
"""
Custom renderer for text/plain format.
charset is 'utf-8' by default.
"""
media_type = "text/plain"
format = "txt"
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
response = renderer_context.get("response")
if not response or response.exception:
return data.get("detail", "").encode(self.charset)
data = render_to_string(
"restapi/log.txt",
{"build": data},
)
return data.encode(self.charset)
| PlainTextBuildRenderer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 936185,
"end": 936570
} | class ____(
sgqlc.types.Type,
Node,
AuditEntry,
OrganizationAuditEntryData,
RepositoryAuditEntryData,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("visibility",)
visibility = sgqlc.types.Field(
RepoRemoveMemberAuditEntryVisibility, graphql_name="visibility"
)
| RepoRemoveMemberAuditEntry |
python | huggingface__transformers | tests/models/pixtral/test_modeling_pixtral.py | {
"start": 1031,
"end": 3579
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in Pixtral, the seq length equals the number of patches * batch_size because the patches are flattened
self.seq_length = (image_size // patch_size) ** 2 * batch_size
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
image_sizes = torch.tensor(
[[self.image_size, self.image_size]] * self.batch_size, dtype=torch.long, device=torch_device
)
config = self.get_config()
return config, pixel_values, image_sizes
def get_config(self):
return PixtralVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, image_sizes = config_and_inputs
inputs_dict = {"pixel_values": pixel_values, "image_sizes": image_sizes}
return config, inputs_dict
@require_torch
| PixtralVisionModelTester |
python | mlflow__mlflow | tests/types/test_type_hints.py | {
"start": 811,
"end": 1053
} | class ____(pydantic.BaseModel):
long_field: int
str_field: str
bool_field: bool
double_field: float
binary_field: bytes
datetime_field: datetime.datetime
any_field: Any
optional_str: str | None = None
| CustomModel |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 42624,
"end": 44272
} | class ____(Response):
"""
Response of models.delete endpoint.
:param deleted: Indicates whether the model was deleted
:type deleted: bool
:param url: The url of the model file
:type url: str
"""
_service = "models"
_action = "delete"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates whether the model was deleted",
"type": ["boolean", "null"],
},
"url": {
"description": "The url of the model file",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, deleted: Optional[bool] = None, url: Optional[str] = None, **kwargs: Any) -> None:
super(DeleteResponse, self).__init__(**kwargs)
self.deleted = deleted
self.url = url
@schema_property("deleted")
def deleted(self) -> Optional[bool]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[bool]) -> None:
if value is None:
self._property_deleted = None
return
self.assert_isinstance(value, "deleted", (bool,))
self._property_deleted = value
@schema_property("url")
def url(self) -> Optional[str]:
return self._property_url
@url.setter
def url(self, value: Optional[str]) -> None:
if value is None:
self._property_url = None
return
self.assert_isinstance(value, "url", six.string_types)
self._property_url = value
| DeleteResponse |
python | allegroai__clearml | clearml/backend_api/services/v2_20/auth.py | {
"start": 5692,
"end": 7091
} | class ____(Request):
"""
Creates a new set of credentials for the authenticated user.
New key/secret is returned.
Note: Secret will never be returned in any other API call.
If a secret is lost or compromised, the key should be revoked
and a new set of credentials can be created.
:param label: Optional credentials label
:type label: str
"""
_service = "auth"
_action = "create_credentials"
_version = "2.20"
_schema = {
"additionalProperties": False,
"definitions": {},
"properties": {
"label": {
"description": "Optional credentials label",
"type": ["string", "null"],
}
},
"type": "object",
}
def __init__(self, label: Optional[str] = None, **kwargs: Any) -> None:
super(CreateCredentialsRequest, self).__init__(**kwargs)
self.label = label
@schema_property("label")
def label(self) -> Optional[str]:
return self._property_label
@label.setter
def label(self, value: Optional[str]) -> None:
if value is None:
self._property_label = None
return
self.assert_isinstance(value, "label", six.string_types)
self._property_label = value
| CreateCredentialsRequest |
python | ray-project__ray | python/ray/train/v2/_internal/logging/logging.py | {
"start": 2686,
"end": 4653
} | class ____(logging.Handler):
"""A handler that writes to a log file in the Ray session directory.
The Ray session directory isn't available until Ray is initialized, so any logs
emitted before Ray is initialized will be lost.
This handler will not create the file handler until you emit a log record.
Args:
filename: The name of the log file. The file is created in the 'logs/train'
directory of the Ray session directory.
"""
# TODO (hpguo): This handler class is shared by both Ray Train and ray data. We
# should move this to ray core and make it available to both libraries.
def __init__(self, filename: str):
super().__init__()
self._filename = filename
self._handler = None
self._formatter = None
self._path = None
def emit(self, record):
if self._handler is None:
self._try_create_handler()
if self._handler is not None:
self._handler.emit(record)
def setFormatter(self, fmt: logging.Formatter) -> None:
if self._handler is not None:
self._handler.setFormatter(fmt)
self._formatter = fmt
def get_log_file_path(self) -> Optional[str]:
if self._handler is None:
self._try_create_handler()
return self._path
def _try_create_handler(self):
assert self._handler is None
# Get the Ray Train log directory. If not in a Ray session, return.
# This handler will only be created within a Ray session.
log_directory = LoggingManager.get_log_directory()
if log_directory is None:
return
os.makedirs(log_directory, exist_ok=True)
# Create the log file.
self._path = os.path.join(log_directory, self._filename)
self._handler = logging.FileHandler(self._path)
if self._formatter is not None:
self._handler.setFormatter(self._formatter)
| SessionFileHandler |
python | allegroai__clearml | clearml/datasets/dataset.py | {
"start": 2044,
"end": 2815
} | class ____(object):
relative_path = attrib(default=None, type=str)
hash = attrib(default=None, type=str)
parent_dataset_id = attrib(default=None, type=str)
size = attrib(default=None, type=int)
# support multi part artifact storage
artifact_name = attrib(default=None, type=str)
# cleared when file is uploaded.
local_path = attrib(default=None, type=str)
def as_dict(self) -> Dict:
state = dict(
relative_path=self.relative_path,
hash=self.hash,
parent_dataset_id=self.parent_dataset_id,
size=self.size,
artifact_name=self.artifact_name,
**dict([("local_path", self.local_path)] if self.local_path else ()),
)
return state
@attrs
| FileEntry |
python | kamyu104__LeetCode-Solutions | Python/find-maximum-area-of-a-triangle.py | {
"start": 69,
"end": 1142
} | class ____(object):
def maxArea(self, coords):
"""
:type coords: List[List[int]]
:rtype: int
"""
mx_x = max(x for x, _ in coords)
mn_x = min(x for x, _ in coords)
mx_y = max(y for _, y in coords)
mn_y = min(y for _, y in coords)
lookup_mx_y = collections.defaultdict(lambda: float("-inf"))
lookup_mn_y = collections.defaultdict(lambda: float("inf"))
lookup_mx_x = collections.defaultdict(lambda: float("-inf"))
lookup_mn_x = collections.defaultdict(lambda: float("inf"))
for x, y in coords:
lookup_mx_y[x] = max(lookup_mx_y[x], y)
lookup_mn_y[x] = min(lookup_mn_y[x], y)
lookup_mx_x[y] = max(lookup_mx_x[y], x)
lookup_mn_x[y] = min(lookup_mn_x[y], x)
result = max(max((lookup_mx_y[x]-lookup_mn_y[x])*max(x-mn_x, mx_x-x) for x in lookup_mx_y.iterkeys()),
max((lookup_mx_x[y]-lookup_mn_x[y])*max(y-mn_y, mx_y-y) for y in lookup_mx_x.iterkeys()))
return result if result else -1
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/category.py | {
"start": 648,
"end": 3675
} | class ____(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
"""
Convert strings in *value* to floats using mapping information stored
in the *unit* object.
Parameters
----------
value : str or iterable
Value or list of values to be converted.
unit : `.UnitData`
An object mapping strings to integers.
axis : `~matplotlib.axis.Axis`
The axis on which the converted value is plotted.
.. note:: *axis* is unused.
Returns
-------
float or `~numpy.ndarray` of float
"""
if unit is None:
raise ValueError(
'Missing category information for StrCategoryConverter; '
'this might be caused by unintendedly mixing categorical and '
'numeric data')
StrCategoryConverter._validate_unit(unit)
# dtype = object preserves numerical pass throughs
values = np.atleast_1d(np.array(value, dtype=object))
# force an update so it also does type checking
unit.update(values)
s = np.vectorize(unit._mapping.__getitem__, otypes=[float])(values)
return s if not cbook.is_scalar_or_string(value) else s[0]
@staticmethod
def axisinfo(unit, axis):
"""
Set the default axis ticks and labels.
Parameters
----------
unit : `.UnitData`
object string unit information for value
axis : `~matplotlib.axis.Axis`
axis for which information is being set
.. note:: *axis* is not used
Returns
-------
`~matplotlib.units.AxisInfo`
Information to support default tick labeling
"""
StrCategoryConverter._validate_unit(unit)
# locator and formatter take mapping dict because
# args need to be pass by reference for updates
majloc = StrCategoryLocator(unit._mapping)
majfmt = StrCategoryFormatter(unit._mapping)
return units.AxisInfo(majloc=majloc, majfmt=majfmt)
@staticmethod
def default_units(data, axis):
"""
Set and update the `~matplotlib.axis.Axis` units.
Parameters
----------
data : str or iterable of str
axis : `~matplotlib.axis.Axis`
axis on which the data is plotted
Returns
-------
`.UnitData`
object storing string to integer mapping
"""
# the conversion call stack is default_units -> axis_info -> convert
if axis.units is None:
axis.set_units(UnitData(data))
else:
axis.units.update(data)
return axis.units
@staticmethod
def _validate_unit(unit):
if not hasattr(unit, '_mapping'):
raise ValueError(
f'Provided unit "{unit}" is not valid for a categorical '
'converter, as it does not have a _mapping attribute.')
| StrCategoryConverter |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_tuple.py | {
"start": 2203,
"end": 22080
} | class ____(seq_tests.CommonTest):
type2test = tuple
def test_getitem_error(self):
t = ()
msg = "tuple indices must be integers or slices"
with self.assertRaisesRegex(TypeError, msg):
t['a']
def test_constructors(self):
super().test_constructors()
# calling built-in types without argument must return empty
self.assertEqual(tuple(), ())
t0_3 = (0, 1, 2, 3)
t0_3_bis = tuple(t0_3)
self.assertTrue(t0_3 is t0_3_bis)
self.assertEqual(tuple([]), ())
self.assertEqual(tuple([0, 1, 2, 3]), (0, 1, 2, 3))
self.assertEqual(tuple(''), ())
self.assertEqual(tuple('spam'), ('s', 'p', 'a', 'm'))
self.assertEqual(tuple(x for x in range(10) if x % 2),
(1, 3, 5, 7, 9))
def test_keyword_args(self):
with self.assertRaisesRegex(TypeError, 'keyword argument'):
tuple(sequence=())
def test_keywords_in_subclass(self):
with torch._dynamo.error_on_graph_break(False):
class subclass(tuple):
pass
u = subclass([1, 2])
self.assertIs(type(u), subclass)
self.assertEqual(list(u), [1, 2])
with self.assertRaises(TypeError):
subclass(sequence=())
with torch._dynamo.error_on_graph_break(False):
class subclass_with_init(tuple):
def __init__(self, arg, newarg=None):
self.newarg = newarg
u = subclass_with_init([1, 2], newarg=3)
self.assertIs(type(u), subclass_with_init)
self.assertEqual(list(u), [1, 2])
self.assertEqual(u.newarg, 3)
with torch._dynamo.error_on_graph_break(False):
class subclass_with_new(tuple):
def __new__(cls, arg, newarg=None):
self = super().__new__(cls, arg)
self.newarg = newarg
return self
u = subclass_with_new([1, 2], newarg=3)
self.assertIs(type(u), subclass_with_new)
self.assertEqual(list(u), [1, 2])
self.assertEqual(u.newarg, 3)
def test_truth(self):
super().test_truth()
self.assertTrue(not ())
self.assertTrue((42, ))
def test_len(self):
super().test_len()
self.assertEqual(len(()), 0)
self.assertEqual(len((0,)), 1)
self.assertEqual(len((0, 1, 2)), 3)
def test_iadd(self):
super().test_iadd()
u = (0, 1)
u2 = u
u += (2, 3)
self.assertTrue(u is not u2)
def test_imul(self):
super().test_imul()
u = (0, 1)
u2 = u
u *= 3
self.assertTrue(u is not u2)
def test_tupleresizebug(self):
# Check that a specific bug in _PyTuple_Resize() is squashed.
def f():
for i in range(1000):
yield i
self.assertEqual(list(tuple(f())), list(range(1000)))
# We expect tuples whose base components have deterministic hashes to
# have deterministic hashes too - and, indeed, the same hashes across
# platforms with hash codes of the same bit width.
def test_hash_exact(self):
def check_one_exact(t, e32, e64):
got = hash(t)
expected = e32 if support.NHASHBITS == 32 else e64
if got != expected:
msg = f"FAIL hash({t!r}) == {got} != {expected}"
self.fail(msg)
check_one_exact((), 750394483, 5740354900026072187)
check_one_exact((0,), 1214856301, -8753497827991233192)
check_one_exact((0, 0), -168982784, -8458139203682520985)
check_one_exact((0.5,), 2077348973, -408149959306781352)
check_one_exact((0.5, (), (-2, 3, (4, 6))), 714642271,
-1845940830829704396)
# Various tests for hashing of tuples to check that we get few collisions.
# Does something only if RUN_ALL_HASH_TESTS is true.
#
# Earlier versions of the tuple hash algorithm had massive collisions
# reported at:
# - https://bugs.python.org/issue942952
# - https://bugs.python.org/issue34751
def test_hash_optional(self):
from itertools import product
if not RUN_ALL_HASH_TESTS:
return
# If specified, `expected` is a 2-tuple of expected
# (number_of_collisions, pileup) values, and the test fails if
# those aren't the values we get. Also if specified, the test
# fails if z > `zlimit`.
def tryone_inner(tag, nbins, hashes, expected=None, zlimit=None):
from collections import Counter
nballs = len(hashes)
mean, sdev = support.collision_stats(nbins, nballs)
c = Counter(hashes)
collisions = nballs - len(c)
z = (collisions - mean) / sdev
pileup = max(c.values()) - 1
del c
got = (collisions, pileup)
failed = False
prefix = ""
if zlimit is not None and z > zlimit:
failed = True
prefix = f"FAIL z > {zlimit}; "
if expected is not None and got != expected:
failed = True
prefix += f"FAIL {got} != {expected}; "
if failed or JUST_SHOW_HASH_RESULTS:
msg = f"{prefix}{tag}; pileup {pileup:,} mean {mean:.1f} "
msg += f"coll {collisions:,} z {z:+.1f}"
if JUST_SHOW_HASH_RESULTS:
import sys
print(msg, file=sys.__stdout__)
else:
self.fail(msg)
def tryone(tag, xs,
native32=None, native64=None, hi32=None, lo32=None,
zlimit=None):
NHASHBITS = support.NHASHBITS
hashes = list(map(hash, xs))
tryone_inner(tag + f"; {NHASHBITS}-bit hash codes",
1 << NHASHBITS,
hashes,
native32 if NHASHBITS == 32 else native64,
zlimit)
if NHASHBITS > 32:
shift = NHASHBITS - 32
tryone_inner(tag + "; 32-bit upper hash codes",
1 << 32,
[h >> shift for h in hashes],
hi32,
zlimit)
mask = (1 << 32) - 1
tryone_inner(tag + "; 32-bit lower hash codes",
1 << 32,
[h & mask for h in hashes],
lo32,
zlimit)
# Tuples of smallish positive integers are common - nice if we
# get "better than random" for these.
tryone("range(100) by 3", list(product(range(100), repeat=3)),
(0, 0), (0, 0), (4, 1), (0, 0))
# A previous hash had systematic problems when mixing integers of
# similar magnitude but opposite sign, obscurely related to that
# j ^ -2 == -j when j is odd.
cands = list(range(-10, -1)) + list(range(9))
# Note: -1 is omitted because hash(-1) == hash(-2) == -2, and
# there's nothing the tuple hash can do to avoid collisions
# inherited from collisions in the tuple components' hashes.
tryone("-10 .. 8 by 4", list(product(cands, repeat=4)),
(0, 0), (0, 0), (0, 0), (0, 0))
del cands
# The hashes here are a weird mix of values where all the
# variation is in the lowest bits and across a single high-order
# bit - the middle bits are all zeroes. A decent hash has to
# both propagate low bits to the left and high bits to the
# right. This is also complicated a bit in that there are
# collisions among the hashes of the integers in L alone.
L = [n << 60 for n in range(100)]
tryone("0..99 << 60 by 3", list(product(L, repeat=3)),
(0, 0), (0, 0), (0, 0), (324, 1))
del L
# Used to suffer a massive number of collisions.
tryone("[-3, 3] by 18", list(product([-3, 3], repeat=18)),
(7, 1), (0, 0), (7, 1), (6, 1))
# And even worse. hash(0.5) has only a single bit set, at the
# high end. A decent hash needs to propagate high bits right.
tryone("[0, 0.5] by 18", list(product([0, 0.5], repeat=18)),
(5, 1), (0, 0), (9, 1), (12, 1))
# Hashes of ints and floats are the same across platforms.
# String hashes vary even on a single platform across runs, due
# to hash randomization for strings. So we can't say exactly
# what this should do. Instead we insist that the # of
# collisions is no more than 4 sdevs above the theoretically
# random mean. Even if the tuple hash can't achieve that on its
# own, the string hash is trying to be decently pseudo-random
# (in all bit positions) on _its_ own. We can at least test
# that the tuple hash doesn't systematically ruin that.
tryone("4-char tuples",
list(product("abcdefghijklmnopqrstuvwxyz", repeat=4)),
zlimit=4.0)
# The "old tuple test". See https://bugs.python.org/issue942952.
# Ensures, for example, that the hash:
# is non-commutative
# spreads closely spaced values
# doesn't exhibit cancellation in tuples like (x,(x,y))
N = 50
base = list(range(N))
xp = list(product(base, repeat=2))
inps = base + list(product(base, xp)) + \
list(product(xp, base)) + xp + list(zip(base))
tryone("old tuple test", inps,
(2, 1), (0, 0), (52, 49), (7, 1))
del base, xp, inps
# The "new tuple test". See https://bugs.python.org/issue34751.
# Even more tortured nesting, and a mix of signed ints of very
# small magnitude.
n = 5
A = [x for x in range(-n, n+1) if x != -1]
B = A + [(a,) for a in A]
L2 = list(product(A, repeat=2))
L3 = L2 + list(product(A, repeat=3))
L4 = L3 + list(product(A, repeat=4))
# T = list of testcases. These consist of all (possibly nested
# at most 2 levels deep) tuples containing at most 4 items from
# the set A.
T = A
T += [(a,) for a in B + L4]
T += product(L3, B)
T += product(L2, repeat=2)
T += product(B, L3)
T += product(B, B, L2)
T += product(B, L2, B)
T += product(L2, B, B)
T += product(B, repeat=4)
assert len(T) == 345130
tryone("new tuple test", T,
(9, 1), (0, 0), (21, 5), (6, 1))
def test_repr(self):
l0 = tuple()
l2 = (0, 1, 2)
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), repr(l0))
self.assertEqual(str(a2), repr(l2))
self.assertEqual(repr(a0), "()")
self.assertEqual(repr(a2), "(0, 1, 2)")
def _not_tracked(self, t):
# Nested tuples can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of tuple literals
x, y, z = 1.5, "a", []
self._not_tracked(())
self._not_tracked((1,))
self._not_tracked((1, 2))
self._not_tracked((1, 2, "a"))
self._not_tracked((1, 2, (None, True, False, ()), int))
self._not_tracked((object(),))
self._not_tracked(((1, x), y, (2, 3)))
# Tuples with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked(([],))
self._tracked(([1],))
self._tracked(({},))
self._tracked((set(),))
self._tracked((x, y, z))
def check_track_dynamic(self, tp, always_track):
x, y, z = 1.5, "a", []
check = self._tracked if always_track else self._not_tracked
check(tp())
check(tp([]))
check(tp(set()))
check(tp([1, x, y]))
check(tp(obj for obj in [1, x, y]))
check(tp(set([1, x, y])))
check(tp(tuple([obj]) for obj in [1, x, y]))
check(tuple(tp([obj]) for obj in [1, x, y]))
self._tracked(tp([z]))
self._tracked(tp([[x, y]]))
self._tracked(tp([{x: y}]))
self._tracked(tp(obj for obj in [x, y, z]))
self._tracked(tp(tuple([obj]) for obj in [x, y, z]))
self._tracked(tuple(tp([obj]) for obj in [x, y, z]))
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically constructed tuples.
self.check_track_dynamic(tuple, False)
@support.cpython_only
def test_track_subtypes(self):
# Tuple subtypes must always be tracked
with torch._dynamo.error_on_graph_break(False):
class MyTuple(tuple):
pass
self.check_track_dynamic(MyTuple, True)
@support.cpython_only
def test_bug7466(self):
# Trying to untrack an unfinished tuple could crash Python
self._not_tracked(tuple(gc.collect() for i in range(101)))
def test_repr_large(self):
# Check the repr of large list objects
def check(n):
l = (0,) * n
s = repr(l)
self.assertEqual(s,
'(' + ', '.join(['0'] * n) + ')')
check(10) # check our checking code
check(1000000)
def test_iterator_pickle(self):
# Userlist iterators don't support pickling yet since
# they are based on generators.
data = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(data)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(data))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it, proto)
self.assertEqual(self.type2test(it), self.type2test(data)[1:])
def test_reversed_pickle(self):
data = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = reversed(data)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(reversed(data)))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it, proto)
self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:])
def test_no_comdat_folding(self):
# Issue 8847: In the PGO build, the MSVC linker's COMDAT folding
# optimization causes failures in code that relies on distinct
# function addresses.
with torch._dynamo.error_on_graph_break(False):
class T(tuple): pass
with self.assertRaises(TypeError):
[3,] + T((1,2))
def test_lexicographic_ordering(self):
# Issue 21100
a = self.type2test([1, 2])
b = self.type2test([1, 2, 0])
c = self.type2test([1, 3])
self.assertLess(a, b)
self.assertLess(b, c)
# Notes on testing hash codes. The primary thing is that Python doesn't
# care about "random" hash codes. To the contrary, we like them to be
# very regular when possible, so that the low-order bits are as evenly
# distributed as possible. For integers this is easy: hash(i) == i for
# all not-huge i except i==-1.
#
# For tuples of mixed type there's really no hope of that, so we want
# "randomish" here instead. But getting close to pseudo-random in all
# bit positions is more expensive than we've been willing to pay for.
#
# We can tolerate large deviations from random - what we don't want is
# catastrophic pileups on a relative handful of hash codes. The dict
# and set lookup routines remain effective provided that full-width hash
# codes for not-equal objects are distinct.
#
# So we compute various statistics here based on what a "truly random"
# hash would do, but don't automate "pass or fail" based on those
# results. Instead those are viewed as inputs to human judgment, and the
# automated tests merely ensure we get the _same_ results across
# platforms. In fact, we normally don't bother to run them at all -
# set RUN_ALL_HASH_TESTS to force it.
#
# When global JUST_SHOW_HASH_RESULTS is True, the tuple hash statistics
# are just displayed to stdout. A typical output line looks like:
#
# old tuple test; 32-bit upper hash codes; \
# pileup 49 mean 7.4 coll 52 z +16.4
#
# "old tuple test" is just a string name for the test being run.
#
# "32-bit upper hash codes" means this was run under a 64-bit build and
# we've shifted away the lower 32 bits of the hash codes.
#
# "pileup" is 0 if there were no collisions across those hash codes.
# It's 1 less than the maximum number of times any single hash code was
# seen. So in this case, there was (at least) one hash code that was
# seen 50 times: that hash code "piled up" 49 more times than ideal.
#
# "mean" is the number of collisions a perfectly random hash function
# would have yielded, on average.
#
# "coll" is the number of collisions actually seen.
#
# "z" is "coll - mean" divided by the standard deviation of the number
# of collisions a perfectly random hash function would suffer. A
# positive value is "worse than random", and negative value "better than
# random". Anything of magnitude greater than 3 would be highly suspect
# for a hash function that claimed to be random. It's essentially
# impossible that a truly random function would deliver a result 16.4
# sdevs "worse than random".
#
# But we don't care here! That's why the test isn't coded to fail.
# Knowing something about how the high-order hash code bits behave
# provides insight, but is irrelevant to how the dict and set lookup
# code performs. The low-order bits are much more important to that,
# and on the same test those did "just like random":
#
# old tuple test; 32-bit lower hash codes; \
# pileup 1 mean 7.4 coll 7 z -0.2
#
# So there are always tradeoffs to consider. For another:
#
# 0..99 << 60 by 3; 32-bit hash codes; \
# pileup 0 mean 116.4 coll 0 z -10.8
#
# That was run under a 32-bit build, and is spectacularly "better than
# random". On a 64-bit build the wider hash codes are fine too:
#
# 0..99 << 60 by 3; 64-bit hash codes; \
# pileup 0 mean 0.0 coll 0 z -0.0
#
# but their lower 32 bits are poor:
#
# 0..99 << 60 by 3; 32-bit lower hash codes; \
# pileup 1 mean 116.4 coll 324 z +19.2
#
# In a statistical sense that's waaaaay too many collisions, but (a) 324
# collisions out of a million hash codes isn't anywhere near being a
# real problem; and, (b) the worst pileup on a single hash code is a measly
# 1 extra. It's a relatively poor case for the tuple hash, but still
# fine for practical use.
#
# This isn't, which is what Python 3.7.1 produced for the hashes of
# itertools.product([0, 0.5], repeat=18). Even with a fat 64-bit
# hashcode, the highest pileup was over 16,000 - making a dict/set
# lookup on one of the colliding values thousands of times slower (on
# average) than we expect.
#
# [0, 0.5] by 18; 64-bit hash codes; \
# pileup 16,383 mean 0.0 coll 262,128 z +6073641856.9
# [0, 0.5] by 18; 32-bit lower hash codes; \
# pileup 262,143 mean 8.0 coll 262,143 z +92683.6
if __name__ == "__main__":
run_tests()
| TupleTest |
python | scipy__scipy | scipy/io/arff/tests/test_arffread.py | {
"start": 10697,
"end": 11882
} | class ____:
"""
Regression test for issue #10232:
Exception in loadarff with quoted nominal attributes.
"""
def setup_method(self):
self.data, self.meta = loadarff(test_quoted_nominal)
def test_attributes(self):
assert_equal(len(self.meta._attributes), 2)
age, smoker = self.meta._attributes.values()
assert_equal(age.name, 'age')
assert_equal(age.type_name, 'numeric')
assert_equal(smoker.name, 'smoker')
assert_equal(smoker.type_name, 'nominal')
assert_equal(smoker.values, ['yes', 'no'])
def test_data(self):
age_dtype_instance = np.float64
smoker_dtype_instance = '<S3'
age_expected = np.array([
18,
24,
44,
56,
89,
11,
], dtype=age_dtype_instance)
smoker_expected = np.array([
'no',
'yes',
'no',
'no',
'yes',
'no',
], dtype=smoker_dtype_instance)
assert_array_equal(self.data["age"], age_expected)
assert_array_equal(self.data["smoker"], smoker_expected)
| TestQuotedNominal |
python | pypa__packaging | tests/test_tags.py | {
"start": 4166,
"end": 5129
} | class ____:
def test_simple(self, example_tag: tags.Tag) -> None:
parsed_tags = tags.parse_tag(str(example_tag))
assert parsed_tags == {example_tag}
def test_multi_interpreter(self, example_tag: tags.Tag) -> None:
expected = {example_tag, tags.Tag("py2", "none", "any")}
given = tags.parse_tag("py2.py3-none-any")
assert given == expected
def test_multi_platform(self) -> None:
expected = {
tags.Tag("cp37", "cp37m", platform)
for platform in (
"macosx_10_6_intel",
"macosx_10_9_intel",
"macosx_10_9_x86_64",
"macosx_10_10_intel",
"macosx_10_10_x86_64",
)
}
given = tags.parse_tag(
"cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64."
"macosx_10_10_intel.macosx_10_10_x86_64"
)
assert given == expected
| TestParseTag |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/constant_op_eager_test.py | {
"start": 19226,
"end": 20083
} | class ____(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.numpy()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
| OnesLikeTest |
python | ray-project__ray | python/ray/data/preprocessors/concatenator.py | {
"start": 261,
"end": 6364
} | class ____(Preprocessor):
"""Combine numeric columns into a column of type
:class:`~ray.air.util.tensor_extensions.pandas.TensorDtype`. Only columns
specified in ``columns`` will be concatenated.
This preprocessor concatenates numeric columns and stores the result in a new
column. The new column contains
:class:`~ray.air.util.tensor_extensions.pandas.TensorArrayElement` objects of
shape :math:`(m,)`, where :math:`m` is the number of columns concatenated.
The :math:`m` concatenated columns are dropped after concatenation.
The preprocessor preserves the order of the columns provided in the ``colummns``
argument and will use that order when calling ``transform()`` and ``transform_batch()``.
Examples:
>>> import numpy as np
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import Concatenator
:py:class:`Concatenator` combines numeric columns into a column of
:py:class:`~ray.air.util.tensor_extensions.pandas.TensorDtype`.
>>> df = pd.DataFrame({"X0": [0, 3, 1], "X1": [0.5, 0.2, 0.9]})
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>> concatenator = Concatenator(columns=["X0", "X1"])
>>> concatenator.transform(ds).to_pandas() # doctest: +SKIP
concat_out
0 [0.0, 0.5]
1 [3.0, 0.2]
2 [1.0, 0.9]
By default, the created column is called `"concat_out"`, but you can specify
a different name.
>>> concatenator = Concatenator(columns=["X0", "X1"], output_column_name="tensor")
>>> concatenator.transform(ds).to_pandas() # doctest: +SKIP
tensor
0 [0.0, 0.5]
1 [3.0, 0.2]
2 [1.0, 0.9]
>>> concatenator = Concatenator(columns=["X0", "X1"], dtype=np.float32)
>>> concatenator.transform(ds) # doctest: +SKIP
Dataset(num_rows=3, schema={Y: object, concat_out: TensorDtype(shape=(2,), dtype=float32)})
When ``flatten=True``, nested vectors in the columns will be flattened during concatenation:
>>> df = pd.DataFrame({"X0": [[1, 2], [3, 4]], "X1": [0.5, 0.2]})
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>> concatenator = Concatenator(columns=["X0", "X1"], flatten=True)
>>> concatenator.transform(ds).to_pandas() # doctest: +SKIP
concat_out
0 [1.0, 2.0, 0.5]
1 [3.0, 4.0, 0.2]
Args:
columns: A list of columns to concatenate. The provided order of the columns
will be retained during concatenation.
output_column_name: The desired name for the new column.
Defaults to ``"concat_out"``.
dtype: The ``dtype`` to convert the output tensors to. If unspecified,
the ``dtype`` is determined by standard coercion rules.
raise_if_missing: If ``True``, an error is raised if any
of the columns in ``columns`` don't exist.
Defaults to ``False``.
flatten: If ``True``, nested vectors in the columns will be flattened during
concatenation. Defaults to ``False``.
Raises:
ValueError: if `raise_if_missing` is `True` and a column in `columns` or
doesn't exist in the dataset.
""" # noqa: E501
_is_fittable = False
def __init__(
self,
columns: List[str],
output_column_name: str = "concat_out",
dtype: Optional[np.dtype] = None,
raise_if_missing: bool = False,
flatten: bool = False,
):
super().__init__()
self.columns = columns
self.output_column_name = output_column_name
self.dtype = dtype
self.raise_if_missing = raise_if_missing
self.flatten = flatten
def _validate(self, df: pd.DataFrame) -> None:
missing_columns = set(self.columns) - set(df)
if missing_columns:
message = (
f"Missing columns specified in '{self.columns}': {missing_columns}"
)
if self.raise_if_missing:
raise ValueError(message)
else:
logger.warning(message)
def _transform_pandas(self, df: pd.DataFrame):
self._validate(df)
if self.flatten:
concatenated = df[self.columns].to_numpy()
concatenated = [
np.concatenate(
[
np.atleast_1d(elem)
if self.dtype is None
else np.atleast_1d(elem).astype(self.dtype)
for elem in row
]
)
for row in concatenated
]
else:
concatenated = df[self.columns].to_numpy(dtype=self.dtype)
df = df.drop(columns=self.columns)
# Use a Pandas Series for column assignment to get more consistent
# behavior across Pandas versions.
df.loc[:, self.output_column_name] = pd.Series(list(concatenated))
return df
def get_input_columns(self) -> List[str]:
return self.columns
def get_output_columns(self) -> List[str]:
return [self.output_column_name]
def __repr__(self):
default_values = {
"output_column_name": "concat_out",
"columns": None,
"dtype": None,
"raise_if_missing": False,
"flatten": False,
}
non_default_arguments = []
for parameter, default_value in default_values.items():
value = getattr(self, parameter)
if value != default_value:
non_default_arguments.append(f"{parameter}={value}")
return f"{self.__class__.__name__}({', '.join(non_default_arguments)})"
def __setstate__(self, state: Dict[str, Any]) -> None:
super().__setstate__(state)
# flatten is a recent field, to ensure backwards compatibility
# assign a default in case it is missing in the serialized state
if not hasattr(self, "flatten"):
self.flatten = False
| Concatenator |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py | {
"start": 24732,
"end": 61622
} | class ____(BaseWorker[ECSJobConfiguration, ECSVariables, ECSWorkerResult]):
"""
A Prefect worker to run flow runs as ECS tasks.
"""
type: str = "ecs"
job_configuration: type[ECSJobConfiguration] = ECSJobConfiguration
job_configuration_variables: type[ECSVariables] | None = ECSVariables
_description: str = (
"Execute flow runs within containers on AWS ECS. Works with EC2 "
"and Fargate clusters. Requires an AWS account."
)
_display_name = "AWS Elastic Container Service"
_documentation_url = "https://docs.prefect.io/integrations/prefect-aws/"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png" # noqa
async def _initiate_run(
self,
flow_run: "FlowRun",
configuration: ECSJobConfiguration,
):
"""
Initiates a flow run on AWS ECS. This method does not wait for the flow run to complete.
"""
ecs_client = await run_sync_in_worker_thread(
configuration.aws_credentials.get_client, "ecs"
)
logger = cast(logging.Logger, self.get_flow_run_logger(flow_run))
await run_sync_in_worker_thread(
self._prepare_and_create_task,
logger,
ecs_client,
configuration,
flow_run,
)
async def run(
self,
flow_run: "FlowRun",
configuration: ECSJobConfiguration,
task_status: Optional[anyio.abc.TaskStatus] = None,
) -> ECSWorkerResult:
"""
Runs a given flow run on the current worker.
"""
ecs_client = await run_sync_in_worker_thread(
configuration.aws_credentials.get_client, "ecs"
)
logger = cast(logging.Logger, self.get_flow_run_logger(flow_run))
(
task_arn,
cluster_arn,
) = await run_sync_in_worker_thread(
self._prepare_and_create_task,
logger,
ecs_client,
configuration,
flow_run,
)
# The task identifier is "{cluster}::{task}" where we use the configured cluster
# if set to preserve matching by name rather than arn
# Note "::" is used despite the Prefect standard being ":" because ARNs contain
# single colons.
identifier = (
(configuration.cluster if configuration.cluster else cluster_arn)
+ "::"
+ task_arn
)
if task_status:
task_status.started(identifier)
return ECSWorkerResult(
identifier=identifier,
# The observer will handle crash detection, so we can always return 1 if the task
# was created successfully
status_code=0,
)
def _prepare_and_create_task(
self,
logger: logging.Logger,
ecs_client: "ECSClient",
configuration: ECSJobConfiguration,
flow_run: FlowRun,
) -> Tuple[str, str, dict, bool]:
"""
Register the task definition, create the task run, and wait for it to start.
Returns a tuple of
- The task ARN
- The task's cluster ARN
- The task definition
- A bool indicating if the task definition is newly registered
"""
task_definition_arn = configuration.task_run_request.get("taskDefinition")
new_task_definition_registered = False
if not task_definition_arn:
task_definition = self._prepare_task_definition(
configuration, region=ecs_client.meta.region_name, flow_run=flow_run
)
(
task_definition_arn,
new_task_definition_registered,
) = self._get_or_register_task_definition(
logger, ecs_client, configuration, flow_run, task_definition
)
else:
task_definition = self._retrieve_task_definition(
logger, ecs_client, task_definition_arn
)
if configuration.task_definition:
template_with_placeholders = self.work_pool.base_job_template[
"job_configuration"
]["task_definition"]
placeholders = [
placeholder.name
for placeholder in find_placeholders(template_with_placeholders)
]
logger.warning(
"Skipping task definition construction since a task definition"
" ARN is provided."
)
if placeholders:
logger.warning(
"The following job variable references"
" in the task definition template will be ignored: "
+ ", ".join(placeholders)
)
# Note: _prepare_task_definition (called later) mutates the task definition so
# validation needs to account for the mutation logic
self._validate_task_definition(task_definition, configuration)
if flow_run.deployment_id:
_TASK_DEFINITION_CACHE[flow_run.deployment_id] = task_definition_arn
else:
_TASK_DEFINITION_CACHE[flow_run.flow_id] = task_definition_arn
logger.info(f"Using ECS task definition {task_definition_arn!r}...")
logger.debug(
f"Task definition {json.dumps(task_definition, indent=2, default=str)}"
)
task_run_request = self._prepare_task_run_request(
configuration,
task_definition,
task_definition_arn,
new_task_definition_registered,
)
logger.info("Creating ECS task run...")
logger.debug(
"Task run request"
f"{json.dumps(mask_api_key(task_run_request), indent=2, default=str)}"
)
try:
task = self._create_task_run(ecs_client, task_run_request)
task_arn = task["taskArn"]
cluster_arn = task["clusterArn"]
except Exception as exc:
self._report_task_run_creation_failure(configuration, task_run_request, exc)
raise
return task_arn, cluster_arn
def _report_task_run_creation_failure(
self, configuration: ECSJobConfiguration, task_run: dict, exc: Exception
) -> None:
"""
Wrap common AWS task run creation failures with nicer user-facing messages.
"""
# AWS generates exception types at runtime so they must be captured a bit
# differently than normal.
if "ClusterNotFoundException" in str(exc):
cluster = task_run.get("cluster", "default")
raise RuntimeError(
f"Failed to run ECS task, cluster {cluster!r} not found. "
"Confirm that the cluster is configured in your region."
) from exc
elif (
"No Container Instances" in str(exc) and task_run.get("launchType") == "EC2"
):
cluster = task_run.get("cluster", "default")
raise RuntimeError(
f"Failed to run ECS task, cluster {cluster!r} does not appear to "
"have any container instances associated with it. Confirm that you "
"have EC2 container instances available."
) from exc
elif (
"failed to validate logger args" in str(exc)
and "AccessDeniedException" in str(exc)
and configuration.configure_cloudwatch_logs
):
raise RuntimeError(
"Failed to run ECS task, the attached execution role does not appear"
" to have sufficient permissions. Ensure that the execution role"
f" {configuration.execution_role!r} has permissions"
" logs:CreateLogStream, logs:CreateLogGroup, and logs:PutLogEvents."
)
else:
raise
def _get_or_register_task_definition(
self,
logger: logging.Logger,
ecs_client: "ECSClient",
configuration: ECSJobConfiguration,
flow_run: FlowRun,
task_definition: dict[str, Any],
) -> Tuple[str, bool]:
"""Get or register a task definition for the given flow run.
Returns a tuple of the task definition ARN and a bool indicating if the task
definition is newly registered.
"""
cached_task_definition_arn = (
_TASK_DEFINITION_CACHE.get(flow_run.deployment_id)
if flow_run.deployment_id
else _TASK_DEFINITION_CACHE.get(flow_run.flow_id)
)
new_task_definition_registered = False
if cached_task_definition_arn:
try:
cached_task_definition = self._retrieve_task_definition(
logger, ecs_client, cached_task_definition_arn
)
if not cached_task_definition[
"status"
] == "ACTIVE" or not self._task_definitions_equal(
task_definition, cached_task_definition, logger
):
cached_task_definition_arn = None
except Exception as e:
logger.warning(
f"Failed to retrieve task definition for cached arn {cached_task_definition_arn!r}. "
f"Error: {e}"
)
cached_task_definition_arn = None
if (
not cached_task_definition_arn
and configuration.match_latest_revision_in_family
):
family_name = task_definition.get("family", ECS_DEFAULT_FAMILY)
try:
task_definition_from_family = self._retrieve_task_definition(
logger, ecs_client, family_name
)
if task_definition_from_family and self._task_definitions_equal(
task_definition, task_definition_from_family, logger
):
cached_task_definition_arn = task_definition_from_family[
"taskDefinitionArn"
]
except Exception as e:
logger.warning(
f"Failed to retrieve task definition for family {family_name!r}. "
f"Error: {e}"
)
cached_task_definition_arn = None
if not cached_task_definition_arn:
task_definition_arn = self._register_task_definition(
logger, ecs_client, task_definition
)
new_task_definition_registered = True
else:
task_definition_arn = cached_task_definition_arn
return task_definition_arn, new_task_definition_registered
def _validate_task_definition(
self, task_definition: dict, configuration: ECSJobConfiguration
) -> None:
"""
Ensure that the task definition is compatible with the configuration.
Raises `ValueError` on incompatibility. Returns `None` on success.
"""
if configuration.configure_cloudwatch_logs and not task_definition.get(
"executionRoleArn"
):
raise ValueError(
"An execution role arn must be set on the task definition to use "
"`configure_cloudwatch_logs` or `stream_logs` but no execution role "
"was found on the task definition."
)
launch_type = configuration.task_run_request.get("launchType")
capacity_provider_strategy = configuration.task_run_request.get(
"capacityProviderStrategy"
)
# Users may submit a job with a custom capacity provider strategy which requires
# launch type to be empty.
if not launch_type and not capacity_provider_strategy:
launch_type = ECS_DEFAULT_LAUNCH_TYPE
# Fargate spot requires a launch type and a capacity provider strategy
# otherwise we're valid with a capacity provider strategy alone
if capacity_provider_strategy and launch_type != "FARGATE_SPOT":
return
# Default launch type in compatibilities to maintain functionality with
# _prepare_task_definition which sets requiresCompatibilties to FARGATE
# which is the default launch type.
if launch_type != "EC2" and "FARGATE" not in task_definition.get(
"requiresCompatibilities", [ECS_DEFAULT_LAUNCH_TYPE]
):
raise ValueError(
"Task definition does not have 'FARGATE' in 'requiresCompatibilities'"
f" and cannot be used with launch type {launch_type!r}"
)
if launch_type == "FARGATE" or launch_type == "FARGATE_SPOT":
# Only the 'awsvpc' network mode is supported when using FARGATE
# Default to 'awsvpc' if not provided to maintain functionality with
# _prepare_task_definition which sets network mode to 'awsvpc' if not provided.
network_mode = task_definition.get("networkMode", "awsvpc")
if network_mode != "awsvpc":
raise ValueError(
f"Found network mode {network_mode!r} which is not compatible with "
f"launch type {launch_type!r}. Use either the 'EC2' launch "
"type or the 'awsvpc' network mode."
)
def _register_task_definition(
self,
logger: logging.Logger,
ecs_client: "ECSClient",
task_definition: dict,
) -> str:
"""
Register a new task definition with AWS.
Returns the ARN.
"""
logger.info("Registering ECS task definition...")
logger.debug(
"Task definition request"
f"{json.dumps(task_definition, indent=2, default=str)}"
)
response = ecs_client.register_task_definition(**task_definition)
return response["taskDefinition"]["taskDefinitionArn"]
def _retrieve_task_definition(
self,
logger: logging.Logger,
ecs_client: "ECSClient",
task_definition: str,
):
"""
Retrieve an existing task definition from AWS.
"""
if task_definition.startswith("arn:aws:ecs:"):
logger.info(f"Retrieving ECS task definition {task_definition!r}...")
else:
logger.info(
"Retrieving most recent active revision from "
f"ECS task family {task_definition!r}..."
)
response = ecs_client.describe_task_definition(taskDefinition=task_definition)
return response["taskDefinition"]
def _get_or_generate_family(
self, task_definition: dict[str, Any], flow_run: FlowRun
) -> str:
"""
Gets or generate a family for the task definition.
"""
family = task_definition.get("family")
if not family:
family_prefix = f"{ECS_DEFAULT_FAMILY}_{self._work_pool_name}"
if flow_run.deployment_id:
family = f"{family_prefix}_{flow_run.deployment_id}"
else:
family = f"{family_prefix}_{flow_run.flow_id}"
slugify(
family,
max_length=255,
regex_pattern=r"[^a-zA-Z0-9-_]+",
)
return family
def _prepare_task_definition(
self,
configuration: ECSJobConfiguration,
region: str,
flow_run: FlowRun,
) -> dict[str, Any]:
"""
Prepare a task definition by inferring any defaults and merging overrides.
"""
task_definition = copy.deepcopy(configuration.task_definition)
# Configure the Prefect runtime container
task_definition.setdefault("containerDefinitions", [])
# Remove empty container definitions
task_definition["containerDefinitions"] = [
d for d in task_definition["containerDefinitions"] if d
]
container_name = configuration.container_name
if not container_name:
container_name = (
_container_name_from_task_definition(task_definition)
or ECS_DEFAULT_CONTAINER_NAME
)
container: dict[str, Any] | None = _get_container(
task_definition["containerDefinitions"], container_name
)
if container is None:
if container_name != ECS_DEFAULT_CONTAINER_NAME:
raise ValueError(
f"Container {container_name!r} not found in task definition."
)
# Look for a container without a name
for container in task_definition["containerDefinitions"]:
if "name" not in container:
container["name"] = container_name
break
else:
container = {"name": container_name}
task_definition["containerDefinitions"].append(container)
if TYPE_CHECKING:
assert container is not None
# Image is required so make sure it's present
container.setdefault("image", get_prefect_image_name())
# Remove any keys that have been explicitly "unset"
unset_keys = {key for key, value in configuration.env.items() if value is None}
for item in tuple(container.get("environment", [])):
if item["name"] in unset_keys or item["value"] is None:
container["environment"].remove(item)
if configuration.configure_cloudwatch_logs:
prefix = f"prefect-logs_{self._work_pool_name}"
if flow_run.deployment_id:
prefix = f"{prefix}_{flow_run.deployment_id}"
else:
prefix = f"{prefix}_{flow_run.flow_id}"
container["logConfiguration"] = {
"logDriver": "awslogs",
"options": {
"awslogs-create-group": "true",
"awslogs-group": "prefect",
"awslogs-region": region,
"awslogs-stream-prefix": (
configuration.cloudwatch_logs_prefix or prefix
),
**configuration.cloudwatch_logs_options,
},
}
task_definition["family"] = self._get_or_generate_family(
task_definition, flow_run
)
# CPU and memory are required in some cases, retrieve the value to use
cpu = task_definition.get("cpu") or ECS_DEFAULT_CPU
memory = task_definition.get("memory") or ECS_DEFAULT_MEMORY
launch_type = configuration.task_run_request.get("launchType")
if launch_type == "FARGATE" or launch_type == "FARGATE_SPOT":
# Task level memory and cpu are required when using fargate
task_definition["cpu"] = str(cpu)
task_definition["memory"] = str(memory)
# The FARGATE compatibility is required if it will be used as as launch type
requires_compatibilities = task_definition.setdefault(
"requiresCompatibilities", []
)
if "FARGATE" not in requires_compatibilities:
task_definition["requiresCompatibilities"].append("FARGATE")
# Only the 'awsvpc' network mode is supported when using FARGATE
# However, we will not enforce that here if the user has set it
task_definition.setdefault("networkMode", "awsvpc")
else:
# Container level memory and cpu are required when using non-FARGATE launch types
container.setdefault("cpu", int(cpu))
container.setdefault("memory", int(memory))
# Ensure set values are cast to strings
if task_definition.get("cpu"):
task_definition["cpu"] = str(task_definition["cpu"])
if task_definition.get("memory"):
task_definition["memory"] = str(task_definition["memory"])
_drop_empty_keys_from_dict(task_definition)
# Handle secrets for both API key and auth string
secrets = []
if configuration.prefect_api_key_secret_arn:
secrets.append(
{
"name": "PREFECT_API_KEY",
"valueFrom": configuration.prefect_api_key_secret_arn,
}
)
# Remove the PREFECT_API_KEY from the environment variables
for item in tuple(container.get("environment", [])):
if item["name"] == "PREFECT_API_KEY":
container["environment"].remove(item) # type: ignore
if configuration.prefect_api_auth_string_secret_arn:
secrets.append(
{
"name": "PREFECT_API_AUTH_STRING",
"valueFrom": configuration.prefect_api_auth_string_secret_arn,
}
)
# Remove the PREFECT_API_AUTH_STRING from the environment variables
for item in tuple(container.get("environment", [])):
if item["name"] == "PREFECT_API_AUTH_STRING":
container["environment"].remove(item) # type: ignore
if secrets:
container["secrets"] = secrets
return task_definition
def _load_network_configuration(
self, vpc_id: Optional[str], configuration: ECSJobConfiguration
) -> dict:
"""
Load settings from a specific VPC or the default VPC and generate a task
run request's network configuration.
"""
ec2_client = configuration.aws_credentials.get_client("ec2")
vpc_message = "the default VPC" if not vpc_id else f"VPC with ID {vpc_id}"
if not vpc_id:
# Retrieve the default VPC
describe = {"Filters": [{"Name": "isDefault", "Values": ["true"]}]}
else:
describe = {"VpcIds": [vpc_id]}
vpcs = ec2_client.describe_vpcs(**describe)["Vpcs"]
if not vpcs:
help_message = (
"Pass an explicit `vpc_id` or configure a default VPC."
if not vpc_id
else "Check that the VPC exists in the current region."
)
raise ValueError(
f"Failed to find {vpc_message}. "
"Network configuration cannot be inferred. " + help_message
)
vpc_id = vpcs[0]["VpcId"]
subnets = ec2_client.describe_subnets(
Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
)["Subnets"]
if not subnets:
raise ValueError(
f"Failed to find subnets for {vpc_message}. "
"Network configuration cannot be inferred."
)
return {
"awsvpcConfiguration": {
"subnets": [s["SubnetId"] for s in subnets],
"assignPublicIp": "ENABLED",
"securityGroups": [],
}
}
def _custom_network_configuration(
self,
vpc_id: str,
network_configuration: dict,
configuration: ECSJobConfiguration,
) -> dict:
"""
Load settings from a specific VPC or the default VPC and generate a task
run request's network configuration.
"""
ec2_client = configuration.aws_credentials.get_client("ec2")
vpc_message = f"VPC with ID {vpc_id}"
vpcs = ec2_client.describe_vpcs(VpcIds=[vpc_id]).get("Vpcs")
if not vpcs:
raise ValueError(
f"Failed to find {vpc_message}. "
+ "Network configuration cannot be inferred. "
+ "Pass an explicit `vpc_id`."
)
vpc_id = vpcs[0]["VpcId"]
subnets = ec2_client.describe_subnets(
Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
)["Subnets"]
if not subnets:
raise ValueError(
f"Failed to find subnets for {vpc_message}. "
+ "Network configuration cannot be inferred."
)
subnet_ids = [subnet["SubnetId"] for subnet in subnets]
config_subnets = network_configuration.get("subnets", [])
if not all(conf_sn in subnet_ids for conf_sn in config_subnets):
raise ValueError(
f"Subnets {config_subnets} not found within {vpc_message}."
+ "Please check that VPC is associated with supplied subnets."
)
return {"awsvpcConfiguration": network_configuration}
def _prepare_task_run_request(
self,
configuration: ECSJobConfiguration,
task_definition: dict[str, Any] | TaskDefinitionTypeDef,
task_definition_arn: str,
new_task_definition_registered: bool,
) -> dict:
"""
Prepare a task run request payload.
"""
task_run_request = deepcopy(configuration.task_run_request)
task_run_request.setdefault("taskDefinition", task_definition_arn)
assert task_run_request["taskDefinition"] == task_definition_arn, (
f"Task definition ARN mismatch: {task_run_request['taskDefinition']!r} "
f"!= {task_definition_arn!r}"
)
# Explicitly add cluster from configuration if set and not already in task_run_request
# or if the value in task_run_request is empty/None
# This ensures cluster is included even when template variables resolve to empty/None
if configuration.cluster:
existing_cluster = task_run_request.get("cluster")
if not existing_cluster:
task_run_request["cluster"] = configuration.cluster
# Explicitly add launchType if missing or empty in task_run_request
# This ensures launchType is included even when template variables resolve to empty/None
# AWS expects camelCase "launchType" not snake_case "launch_type"
# Default to FARGATE if not specified, which matches the default launch_type variable
# Note: launchType may be removed later if capacityProviderStrategy is set
existing_launch_type = task_run_request.get("launchType")
if not existing_launch_type:
# Default to FARGATE if launchType is missing, matching the default launch_type variable
task_run_request["launchType"] = ECS_DEFAULT_LAUNCH_TYPE
capacityProviderStrategy = task_run_request.get("capacityProviderStrategy")
if capacityProviderStrategy:
# Should not be provided at all if capacityProviderStrategy is set, see https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html#ECS-RunTask-request-capacityProviderStrategy # noqa
self._logger.warning(
"Found capacityProviderStrategy. "
"Removing launchType from task run request."
)
task_run_request.pop("launchType", None)
elif task_run_request.get("launchType") == "FARGATE_SPOT":
# Should not be provided at all for FARGATE SPOT
task_run_request.pop("launchType", None)
# A capacity provider strategy is required for FARGATE SPOT
task_run_request["capacityProviderStrategy"] = [
{"capacityProvider": "FARGATE_SPOT", "weight": 1}
]
overrides = task_run_request.get("overrides", {})
container_overrides = overrides.get("containerOverrides", [])
# Ensure the network configuration is present if using awsvpc for network mode
if (
task_definition.get("networkMode") == "awsvpc"
and not task_run_request.get("networkConfiguration")
and not configuration.network_configuration
):
task_run_request["networkConfiguration"] = self._load_network_configuration(
configuration.vpc_id, configuration
)
# Use networkConfiguration if supplied by user
if (
task_definition.get("networkMode") == "awsvpc"
and configuration.network_configuration
and configuration.vpc_id
):
task_run_request["networkConfiguration"] = (
self._custom_network_configuration(
configuration.vpc_id,
configuration.network_configuration,
configuration,
)
)
# Ensure the container name is set if not provided at template time
container_name = (
configuration.container_name
or _container_name_from_task_definition(task_definition)
or ECS_DEFAULT_CONTAINER_NAME
)
if container_overrides and not container_overrides[0].get("name"):
container_overrides[0]["name"] = container_name
# Ensure configuration command is respected post-templating
orchestration_container = _get_container(container_overrides, container_name)
if orchestration_container:
# Override the command if given on the configuration
if configuration.command:
orchestration_container["command"] = configuration.command
# Clean up templated variable formatting
for container in container_overrides:
if isinstance(container.get("command"), str):
container["command"] = shlex.split(container["command"])
if isinstance(container.get("environment"), dict):
container["environment"] = [
{"name": k, "value": v} for k, v in container["environment"].items()
]
# Remove null values — they're not allowed by AWS
container["environment"] = [
item
for item in container.get("environment", [])
if item["value"] is not None
]
if isinstance(task_run_request.get("tags"), dict):
task_run_request["tags"] = [
{"key": k, "value": v} for k, v in task_run_request["tags"].items()
]
if overrides.get("cpu"):
overrides["cpu"] = str(overrides["cpu"])
if overrides.get("memory"):
overrides["memory"] = str(overrides["memory"])
# Ensure configuration tags and env are respected post-templating
tags = [
item
for item in task_run_request.get("tags", [])
if item["key"] not in configuration.labels.keys()
] + [
{"key": k, "value": v}
for k, v in configuration.labels.items()
if v is not None
]
# Slugify tags keys and values
tags = [
{
"key": slugify(
item["key"],
regex_pattern=_TAG_REGEX,
allow_unicode=True,
lowercase=False,
),
"value": slugify(
item["value"],
regex_pattern=_TAG_REGEX,
allow_unicode=True,
lowercase=False,
),
}
for item in tags
]
if (
new_task_definition_registered
and configuration.auto_deregister_task_definition
):
tags.append(
{"key": "prefect.io/degregister-task-definition", "value": "true"}
)
if tags:
task_run_request["tags"] = tags
if orchestration_container:
environment = [
item
for item in orchestration_container.get("environment", [])
if item["name"] not in configuration.env.keys()
] + [
{"name": k, "value": v}
for k, v in configuration.env.items()
if v is not None
]
if environment:
orchestration_container["environment"] = environment
# Remove empty container overrides
overrides["containerOverrides"] = [v for v in container_overrides if v]
return task_run_request
@retry(
stop=stop_after_attempt(MAX_CREATE_TASK_RUN_ATTEMPTS),
wait=wait_fixed(CREATE_TASK_RUN_MIN_DELAY_SECONDS)
+ wait_random(
CREATE_TASK_RUN_MIN_DELAY_JITTER_SECONDS,
CREATE_TASK_RUN_MAX_DELAY_JITTER_SECONDS,
),
reraise=True,
)
def _create_task_run(self, ecs_client: "ECSClient", task_run_request: dict) -> str:
"""
Create a run of a task definition.
Returns the task run ARN.
"""
task = ecs_client.run_task(**task_run_request)
if task["failures"]:
raise RuntimeError(
f"Failed to run ECS task: {task['failures'][0]['reason']}"
)
elif not task["tasks"]:
raise RuntimeError(
"Failed to run ECS task: no tasks or failures were returned."
)
return task["tasks"][0]
def _task_definitions_equal(
self, taskdef_1, taskdef_2, logger: logging.Logger
) -> bool:
"""
Compare two task definitions.
Since one may come from the AWS API and have populated defaults, we do our best
to homogenize the definitions without changing their meaning.
"""
if taskdef_1 == taskdef_2:
return True
if taskdef_1 is None or taskdef_2 is None:
return False
taskdef_1 = copy.deepcopy(taskdef_1)
taskdef_2 = copy.deepcopy(taskdef_2)
for taskdef in (taskdef_1, taskdef_2):
# Set defaults that AWS would set after registration
container_definitions = taskdef.get("containerDefinitions", [])
essential = any(
container.get("essential") for container in container_definitions
)
if not essential:
container_definitions[0].setdefault("essential", True)
taskdef.setdefault("networkMode", "bridge")
# Normalize ordering of lists that ECS considers unordered
# ECS stores these in unordered data structures, so order shouldn't matter for comparison
for container in container_definitions:
# Sort environment variables by name for consistent comparison
if "environment" in container:
container["environment"] = sorted(
container["environment"], key=lambda x: x.get("name", "")
)
# Sort secrets by name for consistent comparison
if "secrets" in container:
container["secrets"] = sorted(
container["secrets"], key=lambda x: x.get("name", "")
)
# Sort environmentFiles by value as they don't have names
if "environmentFiles" in container:
container["environmentFiles"] = sorted(
container["environmentFiles"], key=lambda x: x.get("value", "")
)
_drop_empty_keys_from_dict(taskdef_1)
_drop_empty_keys_from_dict(taskdef_2)
# Clear fields that change on registration for comparison
for field in ECS_POST_REGISTRATION_FIELDS:
taskdef_1.pop(field, None)
taskdef_2.pop(field, None)
# Log differences between task definitions for debugging
if taskdef_1 != taskdef_2:
logger.debug(
"The generated task definition and the retrieved task definition are not equal."
)
# Find and log differences in keys
keys1 = set(taskdef_1.keys())
keys2 = set(taskdef_2.keys())
if keys1 != keys2:
keys_only_in_1 = keys1 - keys2
keys_only_in_2 = keys2 - keys1
if keys_only_in_1:
logger.debug(
f"Keys only in generated task definition: {keys_only_in_1}"
)
if keys_only_in_2:
logger.debug(
f"Keys only in retrieved task definition: {keys_only_in_2}"
)
# Find and log differences in values for common keys
common_keys = keys1.intersection(keys2)
for key in common_keys:
if taskdef_1[key] != taskdef_2[key]:
logger.debug(f"Value differs for key '{key}':")
logger.debug(f" Generated: {taskdef_1[key]}")
logger.debug(f" Retrieved: {taskdef_2[key]}")
return taskdef_1 == taskdef_2
async def __aenter__(self) -> Self:
await start_observer()
return await super().__aenter__()
async def __aexit__(self, *exc_info: Any) -> None:
await stop_observer()
return await super().__aexit__(*exc_info)
| ECSWorker |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 588503,
"end": 589934
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("repository_discussions",)
repository_discussions = sgqlc.types.Field(
sgqlc.types.non_null(DiscussionConnection),
graphql_name="repositoryDiscussions",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
DiscussionOrder,
graphql_name="orderBy",
default={"field": "CREATED_AT", "direction": "DESC"},
),
),
(
"repository_id",
sgqlc.types.Arg(ID, graphql_name="repositoryId", default=None),
),
(
"answered",
sgqlc.types.Arg(Boolean, graphql_name="answered", default=None),
),
)
),
)
| RepositoryDiscussionAuthor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1577434,
"end": 1577804
} | class ____(sgqlc.types.Type, HovercardContext):
"""A hovercard context with a message describing how the viewer is
related.
"""
__schema__ = github_schema
__field_names__ = ("viewer",)
viewer = sgqlc.types.Field(sgqlc.types.non_null(User), graphql_name="viewer")
"""Identifies the user who is related to this context."""
| ViewerHovercardContext |
python | getsentry__sentry | src/sentry/api/authentication.py | {
"start": 2239,
"end": 5822
} | class ____(SiloLimit):
def handle_when_unavailable(
self,
original_method: Callable[..., Any],
current_mode: SiloMode,
available_modes: Iterable[SiloMode],
) -> Callable[..., Any]:
def handle(obj: Any, *args: Any, **kwargs: Any) -> Any:
mode_str = ", ".join(str(m) for m in available_modes)
message = (
f"{type(obj)} used for an endpoint in {current_mode} mode."
f"This authenticator is available only in: {mode_str}"
)
raise self.AvailabilityError(message)
return handle
def __call__(self, decorated_obj: Any) -> Any:
if isinstance(decorated_obj, type):
if issubclass(decorated_obj, BaseAuthentication):
constructor_override = self.create_override(decorated_obj.__init__)
new_class = type(
decorated_obj.__name__,
(decorated_obj,),
{
"__init__": constructor_override,
"silo_limit": self,
},
)
new_class.__module__ = decorated_obj.__module__
return new_class
raise ValueError(
"`@AuthenticationSiloLimit` can decorate only BaseAuthentication subclasses"
)
def is_internal_relay(request, public_key):
"""
Checks if the relay is trusted (authorized for all project configs)
"""
# check legacy whitelisted public_key settings
# (we can't check specific relays but we can check public keys)
if settings.DEBUG or public_key in settings.SENTRY_RELAY_WHITELIST_PK:
return True
return is_internal_ip(request)
def is_static_relay(request):
"""
Checks if the request comes from a statically configured relay
Note: Only checks the relay_id (no public key validation is done).
"""
relay_id = get_header_relay_id(request)
static_relays = options.get("relay.static_auth")
relay_info = static_relays.get(relay_id)
return relay_info is not None
def relay_from_id(request: Request, relay_id: str) -> tuple[Relay | None, bool]:
"""
Tries to find a Relay for a given id
If the id is statically registered than no DB access will be done.
If the id is not among the statically registered relays a lookup in the DB will be performed
:return: A tuple (Relay,bool) containing the Relay model and a flag True for statically configured
relays and False for Relays configured in the DB.
"""
# first see if we have a statically configured relay and therefore we don't
# need to go to the database for it
static_relays = options.get("relay.static_auth")
relay_info = static_relays.get(relay_id)
if relay_info is not None:
# we have a statically configured Relay
relay = Relay(
relay_id=relay_id,
public_key=relay_info.get("public_key"),
is_internal=relay_info.get("internal") is True,
)
return relay, True # a statically configured Relay
else:
try:
relay = Relay.objects.get(relay_id=relay_id)
return relay, False # a Relay from the database
except Relay.DoesNotExist:
return None, False # no Relay found
def update_token_access_record(auth: object):
"""
Perform updates to token models for security purposes (i.e. 'date_last_used')
"""
if is_org_auth_token_auth(auth):
update_org_auth_token_last_used(auth, [])
| AuthenticationSiloLimit |
python | viewflow__viewflow | tests/workflow/test_fields__token.py | {
"start": 792,
"end": 869
} | class ____(models.Model): # noqa: D101
token = TokenField()
| TokenTestModel |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py | {
"start": 17372,
"end": 18691
} | class ____:
@mock.patch(MANAGED_KAFKA_PATH.format("types.ConsumerGroup.to_dict"))
@mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ManagedKafkaUpdateConsumerGroupOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
cluster_id=TEST_CLUSTER_ID,
consumer_group_id=TEST_CONSUMER_GROUP_ID,
consumer_group={},
update_mask={},
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.update_consumer_group.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
cluster_id=TEST_CLUSTER_ID,
consumer_group_id=TEST_CONSUMER_GROUP_ID,
consumer_group={},
update_mask={},
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestManagedKafkaUpdateConsumerGroupOperator |
python | django__django | tests/check_framework/test_urls.py | {
"start": 9454,
"end": 13160
} | class ____(SimpleTestCase):
@override_settings(
ROOT_URLCONF="check_framework.urls.bad_function_based_error_handlers",
)
def test_bad_function_based_handlers(self):
result = check_custom_error_handlers(None)
self.assertEqual(len(result), 4)
for code, num_params, error in zip([400, 403, 404, 500], [2, 2, 2, 1], result):
with self.subTest("handler{}".format(code)):
self.assertEqual(
error,
Error(
"The custom handler{} view 'check_framework.urls."
"bad_function_based_error_handlers.bad_handler' "
"does not take the correct number of arguments "
"(request{}).".format(
code, ", exception" if num_params == 2 else ""
),
id="urls.E007",
),
)
@override_settings(
ROOT_URLCONF="check_framework.urls.bad_class_based_error_handlers",
)
def test_bad_class_based_handlers(self):
result = check_custom_error_handlers(None)
self.assertEqual(len(result), 4)
for code, num_params, error in zip([400, 403, 404, 500], [2, 2, 2, 1], result):
with self.subTest("handler%s" % code):
self.assertEqual(
error,
Error(
"The custom handler%s view 'check_framework.urls."
"bad_class_based_error_handlers.HandlerView.as_view."
"<locals>.view' does not take the correct number of "
"arguments (request%s)."
% (
code,
", exception" if num_params == 2 else "",
),
id="urls.E007",
),
)
@override_settings(
ROOT_URLCONF="check_framework.urls.bad_error_handlers_invalid_path"
)
def test_bad_handlers_invalid_path(self):
result = check_custom_error_handlers(None)
paths = [
"django.views.bad_handler",
"django.invalid_module.bad_handler",
"invalid_module.bad_handler",
"django",
]
hints = [
"Could not import '{}'. View does not exist in module django.views.",
"Could not import '{}'. Parent module django.invalid_module does not "
"exist.",
"No module named 'invalid_module'",
"Could not import '{}'. The path must be fully qualified.",
]
for code, path, hint, error in zip([400, 403, 404, 500], paths, hints, result):
with self.subTest("handler{}".format(code)):
self.assertEqual(
error,
Error(
"The custom handler{} view '{}' could not be imported.".format(
code, path
),
hint=hint.format(path),
id="urls.E008",
),
)
@override_settings(
ROOT_URLCONF="check_framework.urls.good_function_based_error_handlers",
)
def test_good_function_based_handlers(self):
result = check_custom_error_handlers(None)
self.assertEqual(result, [])
@override_settings(
ROOT_URLCONF="check_framework.urls.good_class_based_error_handlers",
)
def test_good_class_based_handlers(self):
result = check_custom_error_handlers(None)
self.assertEqual(result, [])
| CheckCustomErrorHandlersTests |
python | pennersr__django-allauth | allauth/core/exceptions.py | {
"start": 393,
"end": 522
} | class ____(Exception):
"""
Throws when attemtping to signup while signup is closed.
"""
pass
| SignupClosedException |
python | openai__openai-python | src/openai/types/chat/parsed_function_tool_call.py | {
"start": 493,
"end": 801
} | class ____(Function):
parsed_arguments: Optional[object] = None
"""
The arguments to call the function with.
If you used `openai.pydantic_function_tool()` then this will be an
instance of the given `BaseModel`.
Otherwise, this will be the parsed JSON arguments.
"""
| ParsedFunction |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 54587,
"end": 56331
} | class ____:
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_rvs(self):
# 0 < beta < 1
dist = stats.gennorm(0.5)
rng = np.random.default_rng(2204049394)
rvs = dist.rvs(size=1000, random_state=rng)
assert stats.kstest(rvs, dist.cdf).pvalue > 0.1
# beta = 1
dist = stats.gennorm(1)
rvs = dist.rvs(size=1000, random_state=rng)
rvs_laplace = stats.laplace.rvs(size=1000, random_state=rng)
assert stats.ks_2samp(rvs, rvs_laplace).pvalue > 0.1
# beta = 2
dist = stats.gennorm(2)
dist.random_state = rng
rvs = dist.rvs(size=1000, random_state=rng)
rvs_norm = stats.norm.rvs(scale=1/2**0.5, size=1000, random_state=rng)
assert stats.ks_2samp(rvs, rvs_norm).pvalue > 0.1
def test_rvs_broadcasting(self):
dist = stats.gennorm([[0.5, 1.], [2., 5.]])
rng = np.random.default_rng(2204049394)
rvs = dist.rvs(size=[1000, 2, 2], random_state=rng)
assert stats.kstest(rvs[:, 0, 0], stats.gennorm(0.5).cdf)[1] > 0.1
assert stats.kstest(rvs[:, 0, 1], stats.gennorm(1.0).cdf)[1] > 0.1
assert stats.kstest(rvs[:, 1, 0], stats.gennorm(2.0).cdf)[1] > 0.1
assert stats.kstest(rvs[:, 1, 1], stats.gennorm(5.0).cdf)[1] > 0.1
| TestGennorm |
python | pypa__warehouse | tests/unit/subscriptions/test_services.py | {
"start": 15703,
"end": 28456
} | class ____:
def test_verify_service(self):
assert verifyClass(ISubscriptionService, services.StripeSubscriptionService)
def test_service_creation(self):
session = pretend.stub()
service = services.StripeSubscriptionService(session)
assert service.db is session
def test_find_subscriptionid_nonexistent_sub(self, subscription_service):
assert subscription_service.find_subscriptionid("fake_news") is None
def test_find_subscriptionid(self, subscription_service):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
assert (
subscription_service.find_subscriptionid(subscription.subscription_id)
== subscription.id
)
def test_add_subscription(self, billing_service, subscription_service):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
new_subscription = subscription_service.add_subscription(
customer_id=stripe_customer.customer_id,
subscription_id="sub_12345",
subscription_item_id="si_12345",
billing_email="good@day.com",
)
subscription_service.db.flush()
subscription_from_db = subscription_service.get_subscription(
new_subscription.id
)
assert (
subscription_from_db.customer.customer_id
== new_subscription.customer.customer_id
)
assert subscription_from_db.subscription_id == new_subscription.subscription_id
assert (
subscription_from_db.subscription_price_id
== new_subscription.subscription_price_id
)
assert subscription_from_db.status == StripeSubscriptionStatus.Active.value
assert stripe_customer.billing_email == "good@day.com"
def test_update_subscription_status(self, subscription_service, db_request):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
assert subscription.status == StripeSubscriptionStatus.Active.value
subscription_service.update_subscription_status(
subscription.id,
status=StripeSubscriptionStatus.Active.value,
)
assert subscription.status == StripeSubscriptionStatus.Active.value
def test_delete_subscription(self, subscription_service, db_request):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
OrganizationStripeSubscriptionFactory.create(
organization=organization, subscription=subscription
)
subscription_service.delete_subscription(subscription.id)
subscription_service.db.flush()
assert subscription_service.get_subscription(subscription.id) is None
assert not (
db_request.db.query(OrganizationStripeSubscription)
.filter_by(subscription=subscription)
.count()
)
def test_get_subscriptions_by_customer(self, subscription_service):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
subscription1 = StripeSubscriptionFactory.create(customer=stripe_customer)
subscriptions = subscription_service.get_subscriptions_by_customer(
stripe_customer.customer_id
)
assert subscription in subscriptions
assert subscription1 in subscriptions
def test_delete_customer(self, subscription_service, db_request):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription = StripeSubscriptionFactory.create(customer=stripe_customer)
OrganizationStripeSubscriptionFactory.create(
organization=organization, subscription=subscription
)
subscription1 = StripeSubscriptionFactory.create(customer=stripe_customer)
OrganizationStripeSubscriptionFactory.create(
organization=organization, subscription=subscription1
)
subscription_service.delete_customer(stripe_customer.customer_id)
assert subscription_service.get_subscription(subscription.id) is None
assert not (
db_request.db.query(OrganizationStripeSubscription)
.filter_by(subscription=subscription)
.count()
)
assert subscription_service.get_subscription(subscription1.id) is None
assert not (
db_request.db.query(OrganizationStripeSubscription)
.filter_by(subscription=subscription1)
.count()
)
# assert not
assert not (
db_request.db.query(OrganizationStripeCustomer)
.filter_by(organization=organization)
.count()
)
def test_update_customer_email(self, subscription_service, db_request):
organization = OrganizationFactory.create()
stripe_customer = StripeCustomerFactory.create()
OrganizationStripeCustomerFactory.create(
organization=organization, customer=stripe_customer
)
subscription_service.update_customer_email(
stripe_customer.customer_id,
billing_email="great@day.com",
)
assert stripe_customer.billing_email == "great@day.com"
def test_get_subscription_products(self, subscription_service):
subscription_product = StripeSubscriptionProductFactory.create()
subscription_product_deux = StripeSubscriptionProductFactory.create()
subscription_products = subscription_service.get_subscription_products()
assert subscription_product in subscription_products
assert subscription_product_deux in subscription_products
def test_find_subscription_productid_nonexistent_prod(self, subscription_service):
assert subscription_service.find_subscription_productid("can't_see_me") is None
def test_find_subscription_productid(self, subscription_service):
subscription_product = StripeSubscriptionProductFactory.create()
assert (
subscription_service.find_subscription_productid(
subscription_product.product_name
)
== subscription_product.id
)
assert (
subscription_service.find_subscription_productid(
subscription_product.product_id
)
== subscription_product.id
)
def test_add_subscription_product(self, subscription_service):
subscription_product = StripeSubscriptionProductFactory.create()
new_subscription_product = subscription_service.add_subscription_product(
product_name=subscription_product.product_name,
description=subscription_product.description,
product_id=subscription_product.product_id,
tax_code=subscription_product.tax_code,
)
subscription_service.db.flush()
product_from_db = subscription_service.get_subscription_product(
new_subscription_product.id
)
assert product_from_db.product_name == subscription_product.product_name
assert product_from_db.description == subscription_product.description
assert product_from_db.product_id == subscription_product.product_id
assert product_from_db.tax_code == subscription_product.tax_code
assert product_from_db.is_active
def test_update_subscription_product(self, subscription_service, db_request):
subscription_product = StripeSubscriptionProductFactory.create(
product_name="original_name"
)
subscription_service.update_subscription_product(
subscription_product.id,
product_name="updated_product_name",
)
db_subscription_product = subscription_service.get_subscription_product(
subscription_product.id
)
assert db_subscription_product.product_name == "updated_product_name"
def test_delete_subscription_product(self, subscription_service):
subscription_product = StripeSubscriptionProductFactory.create()
subscription_service.delete_subscription_product(subscription_product.id)
subscription_service.db.flush()
assert (
subscription_service.get_subscription_product(subscription_product.id)
is None
)
def test_get_subscription_prices(self, subscription_service):
subscription_price = StripeSubscriptionPriceFactory.create()
subscription_price_deux = StripeSubscriptionPriceFactory.create()
subscription_prices = subscription_service.get_subscription_prices()
assert subscription_price in subscription_prices
assert subscription_price_deux in subscription_prices
def test_find_subscriptionid_nonexistent_price(self, subscription_service):
assert subscription_service.find_subscription_priceid("john_cena") is None
def test_add_subscription_price(self, subscription_service, db_request):
subscription_product = StripeSubscriptionProductFactory.create()
subscription_service.add_subscription_price(
"price_321",
"usd",
subscription_product.id,
1500,
StripeSubscriptionPriceInterval.Month.value,
"taxerrific",
)
subscription_price_id = subscription_service.find_subscription_priceid(
"price_321"
)
subscription_price = subscription_service.get_subscription_price(
subscription_price_id
)
assert subscription_price.is_active
assert subscription_price.price_id == "price_321"
assert subscription_price.currency == "usd"
assert subscription_price.subscription_product_id == subscription_product.id
assert subscription_price.unit_amount == 1500
assert (
subscription_price.recurring == StripeSubscriptionPriceInterval.Month.value
)
assert subscription_price.tax_behavior == "taxerrific"
def test_update_subscription_price(self, subscription_service, db_request):
subscription_price = StripeSubscriptionPriceFactory.create()
assert subscription_price.price_id == "price_123"
assert (
subscription_price.recurring == StripeSubscriptionPriceInterval.Month.value
)
subscription_service.update_subscription_price(
subscription_price.id,
price_id="price_321",
recurring=StripeSubscriptionPriceInterval.Year.value,
)
assert subscription_price.price_id == "price_321"
assert (
subscription_price.recurring == StripeSubscriptionPriceInterval.Year.value
)
db_subscription_price = subscription_service.get_subscription_price(
subscription_price.id
)
assert db_subscription_price.price_id == "price_321"
assert (
db_subscription_price.recurring
== StripeSubscriptionPriceInterval.Year.value
)
def test_delete_subscription_price(self, subscription_service, db_request):
"""
Delete a subscription price
"""
subscription_price = StripeSubscriptionPriceFactory.create()
assert db_request.db.get(StripeSubscriptionPrice, subscription_price.id)
subscription_service.delete_subscription_price(subscription_price.id)
subscription_service.db.flush()
assert not (db_request.db.get(StripeSubscriptionPrice, subscription_price.id))
| TestStripeSubscriptionService |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_quote_name11.py | {
"start": 314,
"end": 1508
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("quote_name11.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
sheet_name = "Sheeté"
worksheet = workbook.add_worksheet(sheet_name)
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [46720128, 46721664]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.repeat_rows(0, 1)
worksheet.set_portrait()
worksheet.vertical_dpi = 200
chart.add_series({"values": [sheet_name, 0, 0, 4, 0]})
chart.add_series({"values": [sheet_name, 0, 1, 4, 1]})
chart.add_series({"values": [sheet_name, 0, 2, 4, 2]})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | django/test/utils.py | {
"start": 29537,
"end": 29935
} | class ____(TestContextDecorator):
"""Decorator or context manager to temporary override the script prefix."""
def __init__(self, prefix):
self.prefix = prefix
super().__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
| override_script_prefix |
python | realpython__materials | python-isinstance/balls_v2.py | {
"start": 38,
"end": 208
} | class ____(ABC):
def __init__(self, color, shape):
self.color = color
self.shape = shape
@abstractmethod
def get_state(self):
pass
| Ball |
python | crytic__slither | slither/detectors/statements/boolean_constant_misuse.py | {
"start": 646,
"end": 4487
} | class ____(AbstractDetector):
"""
Boolean constant misuse
"""
ARGUMENT = "boolean-cst"
HELP = "Misuse of Boolean constant"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = (
"https://github.com/crytic/slither/wiki/Detector-Documentation#misuse-of-a-boolean-constant"
)
WIKI_TITLE = "Misuse of a Boolean constant"
WIKI_DESCRIPTION = """Detects the misuse of a Boolean constant."""
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract A {
function f(uint x) public {
// ...
if (false) { // bad!
// ...
}
// ...
}
function g(bool b) public returns (bool) {
// ...
return (b || true); // bad!
// ...
}
}
```
Boolean constants in code have only a few legitimate uses.
Other uses (in complex expressions, as conditionals) indicate either an error or, most likely, the persistence of faulty code."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = """Verify and simplify the condition."""
@staticmethod
def _detect_boolean_constant_misuses(
contract: Contract,
) -> List[Tuple[Function, Set[Node]]]: # pylint: disable=too-many-branches
"""
Detects and returns all nodes which misuse a Boolean constant.
:param contract: Contract to detect assignment within.
:return: A list of misusing nodes.
"""
# Create our result set.
results: List[Tuple[Function, Set[Node]]] = []
# Loop for each function and modifier.
for function in contract.functions_declared:
f_results = set()
# Loop for every node in this function, looking for boolean constants
for node in function.nodes:
# Do not report "while(true)"
if node.type == NodeType.IFLOOP and node.irs and len(node.irs) == 1:
ir = node.irs[0]
if isinstance(ir, Condition) and ir.value == Constant(
"True", ElementaryType("bool")
):
continue
for ir in node.irs:
if isinstance(ir, (Assignment, Call, Return, InitArray)):
# It's ok to use a bare boolean constant in these contexts
continue
if isinstance(ir, Binary) and ir.type in [
BinaryType.ADDITION,
BinaryType.EQUAL,
BinaryType.NOT_EQUAL,
]:
# Comparing to a Boolean constant is dubious style, but harmless
# Equal is catch by another detector (informational severity)
continue
for r in ir.read:
if isinstance(r, Constant) and isinstance(r.value, bool):
f_results.add(node)
results.append((function, f_results))
# Return the resulting set of nodes with improper uses of Boolean constants
return results
def _detect(self) -> List[Output]:
"""
Detect Boolean constant misuses
"""
results = []
for contract in self.contracts:
boolean_constant_misuses = self._detect_boolean_constant_misuses(contract)
for (func, nodes) in boolean_constant_misuses:
for node in nodes:
info: DETECTOR_INFO = [
func,
" uses a Boolean constant improperly:\n\t-",
node,
"\n",
]
res = self.generate_result(info)
results.append(res)
return results
| BooleanConstantMisuse |
python | getsentry__sentry | tests/sentry/hybridcloud/services/test_control_organization_provisioning.py | {
"start": 4054,
"end": 7520
} | class ____(TestControlOrganizationProvisioningBase):
def test_organization_provisioning_happy_path(self) -> None:
rpc_org_slug = self.provision_organization()
self.assert_slug_reservation_and_org_exist(
rpc_org_slug=rpc_org_slug, user_id=self.provision_user.id
)
def test_organization_provisioning_before_user_provisioning(self) -> None:
provisioning_options = self.generate_provisioning_args(
name="sentry", slug="sentry", email="test-owner@sentry.io", default_team=True
)
slug = control_organization_provisioning_rpc_service.provision_organization(
region_name="us", org_provision_args=provisioning_options
)
self.assert_slug_reservation_and_org_exist(
rpc_org_slug=slug,
)
def test_organization_already_provisioned_for_different_user(self) -> None:
user = self.create_user()
conflicting_slug = self.provisioning_args.provision_options.slug
with assume_test_silo_mode(SiloMode.REGION):
owner_of_conflicting_org = self.create_user()
region_only_organization = self.create_organization(
name="conflicting_org", slug=conflicting_slug, owner=owner_of_conflicting_org
)
# De-register the conflicting organization to create the collision
with (
assume_test_silo_mode(SiloMode.CONTROL),
outbox_context(
transaction.atomic(using=router.db_for_write(OrganizationSlugReservation))
),
):
OrganizationSlugReservation.objects.filter(
organization_id=region_only_organization.id
).delete()
if SiloMode.get_current_mode() == SiloMode.REGION:
with pytest.raises(RpcRemoteException):
self.provision_organization()
else:
with pytest.raises(OrganizationSlugReservation.DoesNotExist):
self.provision_organization()
with assume_test_silo_mode(SiloMode.CONTROL):
assert not OrganizationSlugReservation.objects.filter(slug=conflicting_slug).exists()
assert not OrganizationSlugReservation.objects.filter(user_id=user.id).exists()
self.assert_organization_has_not_changed(region_only_organization)
# TODO(Gabe): Add testing for slug replica status during this failure case
# and ensure that no replica exists for the slug post-deletion
def test_generates_unique_slugs_when_conflicted(self) -> None:
previous_org_slug_reservation = self.provision_organization()
new_org_slug_reservation = self.provision_organization()
assert new_org_slug_reservation != previous_org_slug_reservation
assert self.provisioning_args.provision_options.slug in new_org_slug_reservation.slug
assert new_org_slug_reservation.slug != self.provisioning_args.provision_options.slug
def test_rewrites_numeric_slug_if_prevent_numeric_option_enabled(self) -> None:
numeric_slug = "123456"
self.provisioning_args.provision_options.slug = numeric_slug
org_slug_reservation = self.provision_organization()
assert org_slug_reservation.slug != numeric_slug
self.assert_slug_reservation_and_org_exist(
rpc_org_slug=org_slug_reservation, user_id=self.provision_user.id
)
@all_silo_test(regions=create_test_regions("us"))
| TestControlOrganizationProvisioning |
python | pytorch__pytorch | torch/distributed/fsdp/_common_utils.py | {
"start": 1823,
"end": 3548
} | class ____:
"""
This is a simple abstraction for FSDP computing devices,
which enables custom backends that implement CUDA-like
semantics to be integrated with FSDP.
"""
def __init__(self, device: torch.device, backend: Any = None):
if backend is None:
try:
self.__backend = getattr(torch, device.type)
# pyrefly: ignore [read-only]
self.__device = device
except AttributeError as exc:
raise AttributeError(
f"Device '{device}' does not have a corresponding backend registered as 'torch.{device.type}'."
) from exc
else:
self.__backend = backend
@classmethod
def from_device(cls, device: torch.device) -> "_FSDPDeviceHandle":
"""
Return a device handle corresponding to the device, and through this handle,
operations with the same semantics as CUDA can be performed on the device.
Just return torch.cuda if the device is cuda to make attribute-access faster.
Custom backend must first register a module with the same name with {device.type} on torch.
"""
if device.type == "cuda":
return cast(_FSDPDeviceHandle, torch.cuda)
elif device.type == "mtia":
return cast(_FSDPDeviceHandle, torch.mtia)
return cls(device)
def __getattr__(self, name: str, /) -> Any:
try:
return getattr(self.__backend, name)
except AttributeError as exc:
raise AttributeError(
f"Custom backend '{self.__device.type}' not implement 'torch.{self.__device.type}.{name}'"
) from exc
| _FSDPDeviceHandle |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 15495,
"end": 15891
} | class ____(FunctionPass):
"""Find calls to `numba.literally()` and signal if its requirement is not
satisfied.
"""
_name = "find_literally"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
find_literally_calls(state.func_ir, state.args)
return False
@register_pass(mutates_CFG=True, analysis_only=False)
| FindLiterallyCalls |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 19355,
"end": 20030
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MegatronBert
| MegatronBertPooler |
python | PrefectHQ__prefect | tests/test_exceptions.py | {
"start": 4285,
"end": 4928
} | class ____:
def test_attribute_error_on_getattr(self):
import prefect
with pytest.raises(
AttributeError, match=r"module prefect has no attribute foo"
):
prefect.foo
def test_import_error_on_from_import(self):
with pytest.raises(
ImportError, match=r"cannot import name 'foo' from 'prefect' \(.*\)"
):
from prefect import foo # noqa
def test_module_not_found_erorr_on_import(self):
with pytest.raises(ModuleNotFoundError, match=r"No module named 'prefect.foo'"):
import prefect.foo # noqa
| TestPrefectModuleImportExceptions |
python | ZoranPandovski__al-go-rithms | sort/python/external-sort.py | {
"start": 119,
"end": 1103
} | class ____(object):
BLOCK_FILENAME_FORMAT = 'block_{0}.dat'
def __init__(self, filename):
self.filename = filename
self.block_filenames = []
def write_block(self, data, block_number):
filename = self.BLOCK_FILENAME_FORMAT.format(block_number)
file = open(filename, 'w')
file.write(data)
file.close()
self.block_filenames.append(filename)
def get_block_filenames(self):
return self.block_filenames
def split(self, block_size, sort_key=None):
file = open(self.filename, 'r')
i = 0
while True:
lines = file.readlines(block_size)
if lines == []:
break
if sort_key is None:
lines.sort()
else:
lines.sort(key=sort_key)
self.write_block(''.join(lines), i)
i += 1
def cleanup(self):
map(lambda f: os.remove(f), self.block_filenames)
| FileSplitter |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/distributions.py | {
"start": 1488,
"end": 2468
} | class ____(DistInstance):
def __init__(self, mean, std):
super().__init__()
self.mean = mean
self.std = std
def sample(self):
sample = self.mean + torch.randn_like(self.mean) * self.std
return sample
def deterministic_sample(self):
return self.mean
def log_prob(self, value):
var = self.std**2
log_scale = torch.log(self.std + EPSILON)
return (
-((value - self.mean) ** 2) / (2 * var + EPSILON)
- log_scale
- math.log(math.sqrt(2 * math.pi))
)
def pdf(self, value):
log_prob = self.log_prob(value)
return torch.exp(log_prob)
def entropy(self):
return torch.mean(
0.5 * torch.log(2 * math.pi * math.e * self.std**2 + EPSILON),
dim=1,
keepdim=True,
) # Use equivalent behavior to TF
def exported_model_output(self):
return self.sample()
| GaussianDistInstance |
python | tensorflow__tensorflow | tensorflow/python/ops/special_math_ops_test.py | {
"start": 7051,
"end": 8782
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_dawsn_boundary(self):
self.assertAllClose(0., special_math_ops.dawsn(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.dawsn(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_dawsn_odd(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.dawsn(x)),
self.evaluate(-special_math_ops.dawsn(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_dawsn_small(self, dtype):
x = np.random.uniform(-1., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.dawsn(x), self.evaluate(special_math_ops.dawsn(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_dawsn_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.dawsn(x), self.evaluate(special_math_ops.dawsn(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_dawsn_gradient(self):
inputs = [np.random.uniform(-50., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.dawsn, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
@test_util.run_all_in_graph_and_eager_modes
| DawsnTest |
python | pytorch__pytorch | test/dynamo/test_base_hop.py | {
"start": 508,
"end": 798
} | class ____(torch._higher_order_ops.BaseHOP):
def __init__(self):
super().__init__("invoke_quant_test")
def __call__(self, subgraph, *operands, scheme):
return super().__call__(subgraph, *operands, scheme=scheme)
invoke_quant_test = InvokeQuantTest()
| InvokeQuantTest |
python | getsentry__sentry | src/sentry/integrations/jira_server/integration.py | {
"start": 4917,
"end": 5416
} | class ____(TypedDict):
name: str
type: str
label: str
help: str | str
placeholder: NotRequired[str]
choices: NotRequired[list[tuple[str, str]]]
addButtonText: NotRequired[str]
addDropdown: NotRequired[_AddDropDown]
mappedSelectors: NotRequired[_MappedSelectors]
columnLabels: NotRequired[_ColumnLabels]
mappedColumnLabel: NotRequired[str]
formatMessageValue: NotRequired[bool]
disabled: NotRequired[bool]
disabledReason: NotRequired[str]
| _Config |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 4004,
"end": 4133
} | class ____(Web3Exception):
"""
Raised when a constructor function doesn't exist in contract.
"""
| ABIConstructorNotFound |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 1392,
"end": 1462
} | class ____:
def is_authenticated(self):
return True
| MockUser |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_qt.py | {
"start": 7996,
"end": 22053
} | class ____(FigureCanvasBase, QtWidgets.QWidget):
required_interactive_framework = "qt"
_timer_cls = TimerQT
manager_class = _api.classproperty(lambda cls: FigureManagerQT)
buttond = {
getattr(QtCore.Qt.MouseButton, k): v for k, v in [
("LeftButton", MouseButton.LEFT),
("RightButton", MouseButton.RIGHT),
("MiddleButton", MouseButton.MIDDLE),
("XButton1", MouseButton.BACK),
("XButton2", MouseButton.FORWARD),
]
}
def __init__(self, figure=None):
_create_qApp()
super().__init__(figure=figure)
self._draw_pending = False
self._is_drawing = False
self._draw_rect_callback = lambda painter: None
self._in_resize_event = False
self.setAttribute(QtCore.Qt.WidgetAttribute.WA_OpaquePaintEvent)
self.setMouseTracking(True)
self.resize(*self.get_width_height())
palette = QtGui.QPalette(QtGui.QColor("white"))
self.setPalette(palette)
@QtCore.Slot()
def _update_pixel_ratio(self):
if self._set_device_pixel_ratio(
self.devicePixelRatioF() or 1): # rarely, devicePixelRatioF=0
# The easiest way to resize the canvas is to emit a resizeEvent
# since we implement all the logic for resizing the canvas for
# that event.
event = QtGui.QResizeEvent(self.size(), self.size())
self.resizeEvent(event)
@QtCore.Slot(QtGui.QScreen)
def _update_screen(self, screen):
# Handler for changes to a window's attached screen.
self._update_pixel_ratio()
if screen is not None:
screen.physicalDotsPerInchChanged.connect(self._update_pixel_ratio)
screen.logicalDotsPerInchChanged.connect(self._update_pixel_ratio)
def eventFilter(self, source, event):
if event.type() == QtCore.QEvent.Type.DevicePixelRatioChange:
self._update_pixel_ratio()
return super().eventFilter(source, event)
def showEvent(self, event):
# Set up correct pixel ratio, and connect to any signal changes for it,
# once the window is shown (and thus has these attributes).
window = self.window().windowHandle()
current_version = tuple(int(x) for x in QtCore.qVersion().split('.', 2)[:2])
if current_version >= (6, 6):
self._update_pixel_ratio()
window.installEventFilter(self)
else:
window.screenChanged.connect(self._update_screen)
self._update_screen(window.screen())
def set_cursor(self, cursor):
# docstring inherited
self.setCursor(_api.check_getitem(cursord, cursor=cursor))
def mouseEventCoords(self, pos=None):
"""
Calculate mouse coordinates in physical pixels.
Qt uses logical pixels, but the figure is scaled to physical
pixels for rendering. Transform to physical pixels so that
all of the down-stream transforms work as expected.
Also, the origin is different and needs to be corrected.
"""
if pos is None:
pos = self.mapFromGlobal(QtGui.QCursor.pos())
elif hasattr(pos, "position"): # qt6 QtGui.QEvent
pos = pos.position()
elif hasattr(pos, "pos"): # qt5 QtCore.QEvent
pos = pos.pos()
# (otherwise, it's already a QPoint)
x = pos.x()
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height / self.device_pixel_ratio - pos.y()
return x * self.device_pixel_ratio, y * self.device_pixel_ratio
def enterEvent(self, event):
# Force querying of the modifiers, as the cached modifier state can
# have been invalidated while the window was out of focus.
mods = QtWidgets.QApplication.instance().queryKeyboardModifiers()
if self.figure is None:
return
LocationEvent("figure_enter_event", self,
*self.mouseEventCoords(event),
modifiers=self._mpl_modifiers(mods),
guiEvent=event)._process()
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
if self.figure is None:
return
LocationEvent("figure_leave_event", self,
*self.mouseEventCoords(),
modifiers=self._mpl_modifiers(),
guiEvent=event)._process()
def mousePressEvent(self, event):
button = self.buttond.get(event.button())
if button is not None and self.figure is not None:
MouseEvent("button_press_event", self,
*self.mouseEventCoords(event), button,
modifiers=self._mpl_modifiers(),
guiEvent=event)._process()
def mouseDoubleClickEvent(self, event):
button = self.buttond.get(event.button())
if button is not None and self.figure is not None:
MouseEvent("button_press_event", self,
*self.mouseEventCoords(event), button, dblclick=True,
modifiers=self._mpl_modifiers(),
guiEvent=event)._process()
def mouseMoveEvent(self, event):
if self.figure is None:
return
MouseEvent("motion_notify_event", self,
*self.mouseEventCoords(event),
buttons=self._mpl_buttons(event.buttons()),
modifiers=self._mpl_modifiers(),
guiEvent=event)._process()
def mouseReleaseEvent(self, event):
button = self.buttond.get(event.button())
if button is not None and self.figure is not None:
MouseEvent("button_release_event", self,
*self.mouseEventCoords(event), button,
modifiers=self._mpl_modifiers(),
guiEvent=event)._process()
def wheelEvent(self, event):
# from QWheelEvent::pixelDelta doc: pixelDelta is sometimes not
# provided (`isNull()`) and is unreliable on X11 ("xcb").
if (event.pixelDelta().isNull()
or QtWidgets.QApplication.instance().platformName() == "xcb"):
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps and self.figure is not None:
MouseEvent("scroll_event", self,
*self.mouseEventCoords(event), step=steps,
modifiers=self._mpl_modifiers(),
guiEvent=event)._process()
def keyPressEvent(self, event):
key = self._get_key(event)
if key is not None and self.figure is not None:
KeyEvent("key_press_event", self,
key, *self.mouseEventCoords(),
guiEvent=event)._process()
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is not None and self.figure is not None:
KeyEvent("key_release_event", self,
key, *self.mouseEventCoords(),
guiEvent=event)._process()
def resizeEvent(self, event):
if self._in_resize_event: # Prevent PyQt6 recursion
return
if self.figure is None:
return
self._in_resize_event = True
try:
w = event.size().width() * self.device_pixel_ratio
h = event.size().height() * self.device_pixel_ratio
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# pass back into Qt to let it finish
QtWidgets.QWidget.resizeEvent(self, event)
# emit our resize events
ResizeEvent("resize_event", self)._process()
self.draw_idle()
finally:
self._in_resize_event = False
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minimumSizeHint(self):
return QtCore.QSize(10, 10)
@staticmethod
def _mpl_buttons(buttons):
buttons = _to_int(buttons)
# State *after* press/release.
return {button for mask, button in FigureCanvasQT.buttond.items()
if _to_int(mask) & buttons}
@staticmethod
def _mpl_modifiers(modifiers=None, *, exclude=None):
if modifiers is None:
modifiers = QtWidgets.QApplication.instance().keyboardModifiers()
modifiers = _to_int(modifiers)
# get names of the pressed modifier keys
# 'control' is named 'control' when a standalone key, but 'ctrl' when a
# modifier
# bit twiddling to pick out modifier keys from modifiers bitmask,
# if exclude is a MODIFIER, it should not be duplicated in mods
return [SPECIAL_KEYS[key].replace('control', 'ctrl')
for mask, key in _MODIFIER_KEYS
if exclude != key and modifiers & mask]
def _get_key(self, event):
event_key = event.key()
mods = self._mpl_modifiers(exclude=event_key)
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than Unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# Unicode defines code points up to 0x10ffff (sys.maxunicode)
# QT will use Key_Codes larger than that for keyboard keys that are
# not Unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
if event_key > sys.maxunicode:
return None
key = chr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
return '+'.join(mods + [key])
def flush_events(self):
# docstring inherited
QtWidgets.QApplication.instance().processEvents()
def start_event_loop(self, timeout=0):
# docstring inherited
if hasattr(self, "_event_loop") and self._event_loop.isRunning():
raise RuntimeError("Event loop already running")
self._event_loop = event_loop = QtCore.QEventLoop()
if timeout > 0:
_ = QtCore.QTimer.singleShot(int(timeout * 1000), event_loop.quit)
with _allow_interrupt_qt(event_loop):
qt_compat._exec(event_loop)
def stop_event_loop(self, event=None):
# docstring inherited
if hasattr(self, "_event_loop"):
self._event_loop.quit()
def draw(self):
"""Render the figure, and queue a request for a Qt draw."""
# The renderer draw is done here; delaying causes problems with code
# that uses the result of the draw() to update plot elements.
if self._is_drawing:
return
with cbook._setattr_cm(self, _is_drawing=True):
super().draw()
self.update()
def draw_idle(self):
"""Queue redraw of the Agg buffer and request Qt paintEvent."""
# The Agg draw needs to be handled by the same thread Matplotlib
# modifies the scene graph from. Post Agg draw request to the
# current event loop in order to ensure thread affinity and to
# accumulate multiple draw requests from event handling.
# TODO: queued signal connection might be safer than singleShot
if not (getattr(self, '_draw_pending', False) or
getattr(self, '_is_drawing', False)):
self._draw_pending = True
QtCore.QTimer.singleShot(0, self._draw_idle)
def blit(self, bbox=None):
# docstring inherited
if bbox is None and self.figure:
bbox = self.figure.bbox # Blit the entire canvas if bbox is None.
# repaint uses logical pixels, not physical pixels like the renderer.
l, b, w, h = (int(pt / self.device_pixel_ratio) for pt in bbox.bounds)
t = b + h
self.repaint(l, self.rect().height() - t, w, h)
def _draw_idle(self):
with self._idle_draw_cntx():
if not self._draw_pending:
return
self._draw_pending = False
if _isdeleted(self) or self.height() <= 0 or self.width() <= 0:
return
try:
self.draw()
except Exception:
# Uncaught exceptions are fatal for PyQt5, so catch them.
traceback.print_exc()
def drawRectangle(self, rect):
# Draw the zoom rectangle to the QPainter. _draw_rect_callback needs
# to be called at the end of paintEvent.
if rect is not None:
x0, y0, w, h = (int(pt / self.device_pixel_ratio) for pt in rect)
x1 = x0 + w
y1 = y0 + h
def _draw_rect_callback(painter):
pen = QtGui.QPen(
QtGui.QColor("black"),
1 / self.device_pixel_ratio
)
pen.setDashPattern([3, 3])
for color, offset in [
(QtGui.QColor("black"), 0),
(QtGui.QColor("white"), 3),
]:
pen.setDashOffset(offset)
pen.setColor(color)
painter.setPen(pen)
# Draw the lines from x0, y0 towards x1, y1 so that the
# dashes don't "jump" when moving the zoom box.
painter.drawLine(x0, y0, x0, y1)
painter.drawLine(x0, y0, x1, y0)
painter.drawLine(x0, y1, x1, y1)
painter.drawLine(x1, y0, x1, y1)
else:
def _draw_rect_callback(painter):
return
self._draw_rect_callback = _draw_rect_callback
self.update()
| FigureCanvasQT |
python | walkccc__LeetCode | solutions/1758. Minimum Changes To Make Alternating Binary String/1758.py | {
"start": 0,
"end": 241
} | class ____:
def minOperations(self, s: str) -> int:
# the cost to make s "1010"
cost10 = sum(int(c) == i % 2 for i, c in enumerate(s))
# the cost to make s "0101"
cost01 = len(s) - cost10
return min(cost10, cost01)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/control_flow_v2_func_graphs.py | {
"start": 1784,
"end": 1963
} | class ____(ControlFlowFuncGraph):
"""FuncGraph for the condition of tf.while_loop().
This is used to distinguish while conditions from other functions.
"""
| WhileCondFuncGraph |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/command_parser_test.py | {
"start": 14803,
"end": 15941
} | class ____(test_util.TensorFlowTestCase):
def testParseNoUnitWorks(self):
self.assertEqual(0, command_parser.parse_readable_time_str("0"))
self.assertEqual(100, command_parser.parse_readable_time_str("100 "))
self.assertEqual(25, command_parser.parse_readable_time_str(" 25 "))
def testParseSeconds(self):
self.assertEqual(1e6, command_parser.parse_readable_time_str("1 s"))
self.assertEqual(2e6, command_parser.parse_readable_time_str("2s"))
def testParseMicros(self):
self.assertEqual(2, command_parser.parse_readable_time_str("2us"))
def testParseMillis(self):
self.assertEqual(2e3, command_parser.parse_readable_time_str("2ms"))
def testParseUnsupportedUnitRaisesException(self):
with self.assertRaisesRegex(ValueError, r".*float.*2us.*"):
command_parser.parse_readable_time_str("2uss")
with self.assertRaisesRegex(ValueError, r".*float.*2m.*"):
command_parser.parse_readable_time_str("2m")
with self.assertRaisesRegex(
ValueError, r"Invalid time -1. Time value must be positive."):
command_parser.parse_readable_time_str("-1s")
| ParseReadableTimeStrTest |
python | openai__openai-python | src/openai/resources/containers/files/content.py | {
"start": 5492,
"end": 5713
} | class ____:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = _legacy_response.to_raw_response_wrapper(
content.retrieve,
)
| ContentWithRawResponse |
python | wandb__wandb | wandb/sdk/lib/filesystem.py | {
"start": 3541,
"end": 14451
} | class ____(WriteSerializingFile):
def __init__(self, f: BinaryIO) -> None:
super().__init__(f=f)
self._buff = b""
def write(self, data) -> None: # type: ignore
lines = re.split(b"\r\n|\n", data)
ret = [] # type: ignore
for line in lines:
if line[:1] == b"\r":
if ret:
ret.pop()
elif self._buff:
self._buff = b""
line = line.split(b"\r")[-1]
if line:
ret.append(line)
if self._buff:
ret.insert(0, self._buff)
if ret:
self._buff = ret.pop()
super().write(b"\n".join(ret) + b"\n")
def close(self) -> None:
if self._buff:
super().write(self._buff)
super().close()
def copy_or_overwrite_changed(source_path: StrPath, target_path: StrPath) -> StrPath:
"""Copy source_path to target_path, unless it already exists with the same mtime.
We liberally add write permissions to deal with the case of multiple users needing
to share the same cache or run directory.
Args:
source_path: The path to the file to copy.
target_path: The path to copy the file to.
Returns:
The path to the copied file (which may be different from target_path).
"""
return_type = type(target_path)
target_path = system_preferred_path(target_path, warn=True)
need_copy = (
not os.path.isfile(target_path)
or os.stat(source_path).st_mtime != os.stat(target_path).st_mtime
)
permissions_plus_write = os.stat(source_path).st_mode
if need_copy:
dir_name, file_name = os.path.split(target_path)
target_path = os.path.join(mkdir_allow_fallback(dir_name), file_name)
try:
# Use copy2 to preserve file metadata (including modified time).
shutil.copy2(source_path, target_path)
except PermissionError:
# If the file is read-only try to make it writable.
try:
os.chmod(target_path, permissions_plus_write)
shutil.copy2(source_path, target_path)
except PermissionError as e:
raise PermissionError("Unable to overwrite '{target_path!s}'") from e
# Prevent future permissions issues by universal write permissions now.
os.chmod(target_path, permissions_plus_write)
return return_type(target_path) # type: ignore # 'os.PathLike' is abstract.
@contextlib.contextmanager
def safe_open(
path: StrPath, mode: str = "r", *args: Any, **kwargs: Any
) -> Generator[IO, None, None]:
"""Open a file, ensuring any changes only apply atomically after close.
This context manager ensures that even unsuccessful writes will not leave a "dirty"
file or overwrite good data, and that all temp data is cleaned up.
The semantics and behavior are intended to be nearly identical to the built-in
open() function. Differences:
- It creates any parent directories that don't exist, rather than raising.
- In 'x' mode, it checks at the beginning AND end of the write and fails if the
file exists either time.
"""
path = Path(path).resolve()
path.parent.mkdir(parents=True, exist_ok=True)
if "x" in mode and path.exists():
raise FileExistsError(f"{path!s} already exists")
if "r" in mode and "+" not in mode:
# This is read-only, so we can just open the original file.
# TODO (hugh): create a reflink and read from that.
with path.open(mode, *args, **kwargs) as f:
yield f
return
with tempfile.TemporaryDirectory(dir=path.parent) as tmp_dir:
tmp_path = Path(tmp_dir) / path.name
if ("r" in mode or "a" in mode) and path.exists():
# We need to copy the original file in order to support reads and appends.
# TODO (hugh): use reflinks to avoid the copy on platforms that support it.
shutil.copy2(path, tmp_path)
with tmp_path.open(mode, *args, **kwargs) as f:
yield f
f.flush()
os.fsync(f.fileno())
if "x" in mode:
# Ensure that if another process has beaten us to writing the file we raise
# rather than overwrite. os.link() atomically creates a hard link to the
# target file and will raise FileExistsError if the target already exists.
os.link(tmp_path, path)
os.unlink(tmp_path)
else:
tmp_path.replace(path)
def safe_copy(source_path: StrPath, target_path: StrPath) -> StrPath:
"""Copy a file atomically.
Copying is not usually atomic, and on operating systems that allow multiple
writers to the same file, the result can get corrupted. If two writers copy
to the same file, the contents can become interleaved.
We mitigate the issue somewhat by copying to a temporary file first and
then renaming. Renaming is atomic: if process 1 renames file A to X and
process 2 renames file B to X, then X will either contain the contents
of A or the contents of B, not some mixture of both.
"""
# TODO (hugh): check that there is enough free space.
output_path = Path(target_path).resolve()
output_path.parent.mkdir(parents=True, exist_ok=True)
with tempfile.TemporaryDirectory(dir=output_path.parent) as tmp_dir:
tmp_path = (Path(tmp_dir) / Path(source_path).name).with_suffix(".tmp")
shutil.copy2(source_path, tmp_path)
tmp_path.replace(output_path)
return target_path
def _reflink_linux(existing_path: Path, new_path: Path) -> None:
"""Create a reflink to `existing_path` at `new_path` on Linux."""
import fcntl
FICLONE = 0x40049409 # magic number from <linux/fs.h> # noqa: N806
with open(existing_path, "rb") as t_f, open(new_path, "wb+") as l_f:
fcntl.ioctl(l_f.fileno(), FICLONE, t_f.fileno())
def _reflink_macos(existing_path: Path, new_path: Path) -> None:
try:
clib = ctypes.CDLL("libc.dylib", use_errno=True)
except (FileNotFoundError, OSError) as e:
if ctypes.get_errno() != errno.ENOENT and not isinstance(e, FileNotFoundError):
raise
# Before macOS 11 (<Nov 2020) clib was in libSystem.dylib, so we can try there.
clib = ctypes.CDLL("/usr/lib/libSystem.dylib", use_errno=True)
try:
clonefile = clib.clonefile
except AttributeError:
raise OSError(errno.ENOTSUP, "'clonefile' is not available on this system")
clonefile.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int)
clonefile.restype = ctypes.c_int
if clonefile(os.fsencode(existing_path), os.fsencode(new_path), ctypes.c_int(0)):
# Anything other than 0 is an error.
err = ctypes.get_errno()
raise OSError(err, os.strerror(err), existing_path)
def reflink(existing_path: StrPath, new_path: StrPath, overwrite: bool = False) -> None:
"""Create a reflink to `existing_path` at `new_path`.
A reflink (reflective link) is a copy-on-write reference to a file. Once linked, the
file and link are both "real" files (not symbolic or hard links) and each can be
modified independently without affecting the other; however, they share the same
underlying data blocks on disk so until one is modified they are "zero-cost" copies.
Reflinks have all the functionality of copies, so we should use them wherever they
are supported if we would otherwise copy a file. (This is not particularly radical--
GNU `cp` defaults to `reflink=auto`, using it whenever available) However, support
for them is limited to a small number of filesystems. They should work on:
- Linux with a Btrfs or XFS filesystem (NOT ext4)
- macOS 10.13 or later with an APFS filesystem (called clone files)
Reflinks are also supported on Solaris and Windows with ReFSv2, but we haven't
implemented support for them.
Like hard links, a reflink can only be created on the same filesystem as the target.
"""
if platform.system() == "Linux":
link_fn = _reflink_linux
elif platform.system() == "Darwin":
link_fn = _reflink_macos
else:
raise OSError(
errno.ENOTSUP, f"reflinks are not supported on {platform.system()}"
)
new_path = Path(new_path).resolve()
existing_path = Path(existing_path).resolve()
if new_path.exists():
if not overwrite:
raise FileExistsError(f"{new_path} already exists")
logger.warning(f"Overwriting existing file {new_path}.")
new_path.unlink()
# Create any missing parent directories.
new_path.parent.mkdir(parents=True, exist_ok=True)
try:
link_fn(existing_path, new_path)
except OSError as e:
base_msg = f"failed to create reflink from {existing_path} to {new_path}."
if e.errno in (errno.EPERM, errno.EACCES):
raise PermissionError(f"Insufficient permissions; {base_msg}") from e
if e.errno == errno.ENOENT:
raise FileNotFoundError(f"File not found; {base_msg}") from e
if e.errno == errno.EXDEV:
raise ValueError(f"Cannot link across filesystems; {base_msg}") from e
if e.errno == errno.EISDIR:
raise IsADirectoryError(f"Cannot reflink a directory; {base_msg}") from e
if e.errno in (errno.EOPNOTSUPP, errno.ENOTSUP):
raise OSError(
errno.ENOTSUP,
f"Filesystem does not support reflinks; {base_msg}",
) from e
if e.errno == errno.EINVAL:
raise ValueError(f"Cannot link file ranges; {base_msg}") from e
raise
def check_exists(path: StrPath) -> StrPath | None:
"""Look for variations of `path` and return the first found.
This exists to support former behavior around system-dependent paths; we used to use
':' in Artifact paths unless we were on Windows, but this has issues when e.g. a
Linux machine is accessing an NTFS filesystem; we might need to look for the
alternate path. This checks all the possible directories we would consider creating.
"""
for dest in path_fallbacks(path):
if os.path.exists(dest):
return Path(dest) if isinstance(path, Path) else dest
return None
def system_preferred_path(path: StrPath, warn: bool = False) -> StrPath:
"""Replace ':' with '-' in paths on Windows.
Args:
path: The path to convert.
warn: Whether to warn if ':' is replaced.
"""
if platform.system() != "Windows":
return path
head, tail = os.path.splitdrive(path)
if warn and ":" in tail:
logger.warning(f"Replacing ':' in {tail} with '-'")
new_path = head + tail.replace(":", "-")
return Path(new_path) if isinstance(path, Path) else new_path
@dataclasses.dataclass
| CRDedupedFile |
python | sympy__sympy | sympy/physics/quantum/trace.py | {
"start": 2025,
"end": 6397
} | class ____(Expr):
""" Generic Trace operation than can trace over:
a) SymPy matrix
b) operators
c) outer products
Parameters
==========
o : operator, matrix, expr
i : tuple/list indices (optional)
Examples
========
# TODO: Need to handle printing
a) Trace(A+B) = Tr(A) + Tr(B)
b) Trace(scalar*Operator) = scalar*Trace(Operator)
>>> from sympy.physics.quantum.trace import Tr
>>> from sympy import symbols, Matrix
>>> a, b = symbols('a b', commutative=True)
>>> A, B = symbols('A B', commutative=False)
>>> Tr(a*A,[2])
a*Tr(A)
>>> m = Matrix([[1,2],[1,1]])
>>> Tr(m)
2
"""
def __new__(cls, *args):
""" Construct a Trace object.
Parameters
==========
args = SymPy expression
indices = tuple/list if indices, optional
"""
# expect no indices,int or a tuple/list/Tuple
if (len(args) == 2):
if not isinstance(args[1], (list, Tuple, tuple)):
indices = Tuple(args[1])
else:
indices = Tuple(*args[1])
expr = args[0]
elif (len(args) == 1):
indices = Tuple()
expr = args[0]
else:
raise ValueError("Arguments to Tr should be of form "
"(expr[, [indices]])")
if isinstance(expr, Matrix):
return expr.trace()
elif hasattr(expr, 'trace') and callable(expr.trace):
#for any objects that have trace() defined e.g numpy
return expr.trace()
elif isinstance(expr, Add):
return Add(*[Tr(arg, indices) for arg in expr.args])
elif isinstance(expr, Mul):
c_part, nc_part = expr.args_cnc()
if len(nc_part) == 0:
return Mul(*c_part)
else:
obj = Expr.__new__(cls, Mul(*nc_part), indices )
#this check is needed to prevent cached instances
#being returned even if len(c_part)==0
return Mul(*c_part)*obj if len(c_part) > 0 else obj
elif isinstance(expr, Pow):
if (_is_scalar(expr.args[0]) and
_is_scalar(expr.args[1])):
return expr
else:
return Expr.__new__(cls, expr, indices)
else:
if (_is_scalar(expr)):
return expr
return Expr.__new__(cls, expr, indices)
@property
def kind(self):
expr = self.args[0]
expr_kind = expr.kind
return expr_kind.element_kind
def doit(self, **hints):
""" Perform the trace operation.
#TODO: Current version ignores the indices set for partial trace.
>>> from sympy.physics.quantum.trace import Tr
>>> from sympy.physics.quantum.operator import OuterProduct
>>> from sympy.physics.quantum.spin import JzKet, JzBra
>>> t = Tr(OuterProduct(JzKet(1,1), JzBra(1,1)))
>>> t.doit()
1
"""
if hasattr(self.args[0], '_eval_trace'):
return self.args[0]._eval_trace(indices=self.args[1])
return self
@property
def is_number(self):
# TODO : improve this implementation
return True
#TODO: Review if the permute method is needed
# and if it needs to return a new instance
def permute(self, pos):
""" Permute the arguments cyclically.
Parameters
==========
pos : integer, if positive, shift-right, else shift-left
Examples
========
>>> from sympy.physics.quantum.trace import Tr
>>> from sympy import symbols
>>> A, B, C, D = symbols('A B C D', commutative=False)
>>> t = Tr(A*B*C*D)
>>> t.permute(2)
Tr(C*D*A*B)
>>> t.permute(-2)
Tr(C*D*A*B)
"""
if pos > 0:
pos = pos % len(self.args[0].args)
else:
pos = -(abs(pos) % len(self.args[0].args))
args = list(self.args[0].args[-pos:] + self.args[0].args[0:-pos])
return Tr(Mul(*(args)))
def _hashable_content(self):
if isinstance(self.args[0], Mul):
args = _cycle_permute(_rearrange_args(self.args[0].args))
else:
args = [self.args[0]]
return tuple(args) + (self.args[1], )
| Tr |
python | walkccc__LeetCode | solutions/1866. Number of Ways to Rearrange Sticks With K Sticks Visible/1866.py | {
"start": 0,
"end": 294
} | class ____:
@functools.lru_cache(None)
def rearrangeSticks(self, n: int, k: int) -> int:
if n == k:
return 1
if k == 0:
return 0
return (self.rearrangeSticks(n - 1, k - 1) +
self.rearrangeSticks(n - 1, k) * (n - 1)) % self.MOD
MOD = 1_000_000_007
| Solution |
python | mlflow__mlflow | dev/clint/src/clint/rules/os_environ_delete_in_test.py | {
"start": 84,
"end": 1148
} | class ____(Rule):
def _message(self) -> str:
return (
"Do not delete `os.environ` in test directly (del os.environ[...] or "
"os.environ.pop(...)). Use `monkeypatch.delenv` "
"(https://docs.pytest.org/en/stable/reference/reference.html#pytest.MonkeyPatch.delenv)."
)
@staticmethod
def check(node: ast.Delete | ast.Call, resolver: Resolver) -> bool:
"""
Returns True if the operation is deletion from os.environ[...] or
a call to os.environ.pop().
"""
if isinstance(node, ast.Delete):
# Handle: del os.environ["KEY"]
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Subscript):
resolved = resolver.resolve(node.targets[0].value)
return resolved == ["os", "environ"]
elif isinstance(node, ast.Call):
# Handle: os.environ.pop("KEY")
resolved = resolver.resolve(node)
return resolved == ["os", "environ", "pop"]
return False
| OsEnvironDeleteInTest |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/pex_builder/gitlab_context.py | {
"start": 153,
"end": 1096
} | class ____:
def __init__(self, project_dir: str):
# https://docs.gitlab.com/ee/ci/variables/predefined_variables.html
self.commit_sha = os.environ["CI_COMMIT_SHA"]
self.branch_name = os.environ.get("CI_COMMIT_REF_NAME", "")
self.job_url = os.environ["CI_JOB_URL"]
self.project_name = os.environ["CI_PROJECT_NAME"]
self.project_url = os.environ["CI_PROJECT_URL"]
self.merge_request_iid: Optional[str] = os.environ.get("CI_MERGE_REQUEST_IID")
self.branch_url = f"{self.project_url}/-/tree/{self.branch_name}"
if self.merge_request_iid:
self.merge_request_url = f"{self.project_url}/-/merge_requests/{self.merge_request_iid}"
else:
self.merge_request_url = None
self.git_metadata = git_context.get_git_commit_metadata(project_dir)
def get_gitlab_event(project_dir) -> GitlabEvent:
return GitlabEvent(project_dir)
| GitlabEvent |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 8002,
"end": 8360
} | class ____(BaseModel):
"""
Create asset events request.
"""
model_config = ConfigDict(
extra="forbid",
)
asset_id: Annotated[int, Field(title="Asset Id")]
partition_key: Annotated[str | None, Field(title="Partition Key")] = None
extra: Annotated[dict[str, Any] | None, Field(title="Extra")] = None
| CreateAssetEventsBody |
python | ray-project__ray | rllib/offline/estimators/importance_sampling.py | {
"start": 460,
"end": 4601
} | class ____(OffPolicyEstimator):
r"""The step-wise IS estimator.
Let s_t, a_t, and r_t be the state, action, and reward at timestep t.
For behavior policy \pi_b and evaluation policy \pi_e, define the
cumulative importance ratio at timestep t as:
p_t = \sum_{t'=0}^t (\pi_e(a_{t'} | s_{t'}) / \pi_b(a_{t'} | s_{t'})).
This estimator computes the expected return for \pi_e for an episode as:
V^{\pi_e}(s_0) = \sum_t \gamma ^ {t} * p_t * r_t
and returns the mean and standard deviation over episodes.
For more information refer to https://arxiv.org/pdf/1911.06854.pdf"""
@override(OffPolicyEstimator)
def estimate_on_single_episode(self, episode: SampleBatch) -> Dict[str, float]:
estimates_per_epsiode = {}
rewards, old_prob = episode["rewards"], episode["action_prob"]
new_prob = self.compute_action_probs(episode)
# calculate importance ratios
p = []
for t in range(episode.count):
if t == 0:
pt_prev = 1.0
else:
pt_prev = p[t - 1]
p.append(pt_prev * new_prob[t] / old_prob[t])
# calculate stepwise IS estimate
v_behavior = 0.0
v_target = 0.0
for t in range(episode.count):
v_behavior += rewards[t] * self.gamma**t
v_target += p[t] * rewards[t] * self.gamma**t
estimates_per_epsiode["v_behavior"] = v_behavior
estimates_per_epsiode["v_target"] = v_target
return estimates_per_epsiode
@override(OffPolicyEstimator)
def estimate_on_single_step_samples(
self, batch: SampleBatch
) -> Dict[str, List[float]]:
estimates_per_epsiode = {}
rewards, old_prob = batch["rewards"], batch["action_prob"]
new_prob = self.compute_action_probs(batch)
weights = new_prob / old_prob
v_behavior = rewards
v_target = weights * rewards
estimates_per_epsiode["v_behavior"] = v_behavior
estimates_per_epsiode["v_target"] = v_target
return estimates_per_epsiode
@override(OfflineEvaluator)
def estimate_on_dataset(
self, dataset: Dataset, *, n_parallelism: int = ...
) -> Dict[str, Any]:
"""Computes the Importance sampling estimate on the given dataset.
Note: This estimate works for both continuous and discrete action spaces.
Args:
dataset: Dataset to compute the estimate on. Each record in dataset should
include the following columns: `obs`, `actions`, `action_prob` and
`rewards`. The `obs` on each row shoud be a vector of D dimensions.
n_parallelism: The number of parallel workers to use.
Returns:
A dictionary containing the following keys:
v_target: The estimated value of the target policy.
v_behavior: The estimated value of the behavior policy.
v_gain_mean: The mean of the gain of the target policy over the
behavior policy.
v_gain_ste: The standard error of the gain of the target policy over
the behavior policy.
"""
batch_size = max(dataset.count() // n_parallelism, 1)
dataset = dataset.map_batches(
remove_time_dim, batch_size=batch_size, batch_format="pandas"
)
updated_ds = dataset.map_batches(
compute_is_weights,
batch_size=batch_size,
batch_format="pandas",
fn_kwargs={
"policy_state": self.policy.get_state(),
"estimator_class": self.__class__,
},
)
v_target = updated_ds.mean("weighted_rewards")
v_behavior = updated_ds.mean("rewards")
v_gain_mean = v_target / v_behavior
v_gain_ste = (
updated_ds.std("weighted_rewards") / v_behavior / math.sqrt(dataset.count())
)
return {
"v_target": v_target,
"v_behavior": v_behavior,
"v_gain_mean": v_gain_mean,
"v_gain_ste": v_gain_ste,
}
| ImportanceSampling |
python | pandas-dev__pandas | asv_bench/benchmarks/io/sql.py | {
"start": 3177,
"end": 4369
} | class ____:
def setup(self):
N = 10000
self.table_name = "test"
self.con = create_engine("sqlite:///:memory:")
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=Index([f"i-{i}" for i in range(N)], dtype=object),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
self.df["time"] = self.df["datetime"].dt.time
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_read_sql_table_all(self):
read_sql_table(self.table_name, self.con)
def time_read_sql_table_parse_dates(self):
read_sql_table(
self.table_name,
self.con,
columns=["datetime_string"],
parse_dates=["datetime_string"],
)
| ReadSQLTable |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 40195,
"end": 41232
} | class ____(axis_line, axis_ticks, panel_grid, legend_ticks):
"""
All line elements
Parameters
----------
theme_element : element_line
"""
@property
def rcParams(self) -> dict[str, Any]:
rcParams = super().rcParams
color = self.properties.get("color")
linewidth = self.properties.get("linewidth")
linestyle = self.properties.get("linestyle")
d = {}
if color:
d["axes.edgecolor"] = color
d["xtick.color"] = color
d["ytick.color"] = color
d["grid.color"] = color
if linewidth:
d["axes.linewidth"] = linewidth
d["xtick.major.width"] = linewidth
d["xtick.minor.width"] = linewidth
d["ytick.major.width"] = linewidth
d["ytick.minor.width"] = linewidth
d["grid.linewidth"] = linewidth
if linestyle:
d["grid.linestyle"] = linestyle
rcParams.update(d)
return rcParams
# element_rect themeables
| line |
python | Textualize__textual | src/textual/strip.py | {
"start": 1233,
"end": 2006
} | class ____:
"""A renderable which renders a list of strips into lines."""
def __init__(self, strips: list[Strip], width: int | None = None) -> None:
self._strips = strips
self._width = width
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
new_line = Segment.line()
for strip in self._strips:
yield from strip
yield new_line
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
if self._width is None:
width = max(strip.cell_length for strip in self._strips)
else:
width = self._width
return Measurement(width, width)
@rich.repr.auto
| StripRenderable |
python | sqlalchemy__sqlalchemy | test/engine/test_reconnect.py | {
"start": 1416,
"end": 1460
} | class ____(MockError):
pass
| MockDisconnect |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 142115,
"end": 143293
} | class ____(Layout):
"""Is a view into the storage of another tensor"""
def __init__(self, view: Union[BaseView, TensorBox]) -> None:
layout = view.get_layout()
super().__init__(
layout.device,
layout.dtype,
layout.size,
layout.stride,
)
self.view = view
def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:
return self.as_fixed().make_indexer()
def maybe_guard_aligned(self) -> bool:
offset = self.view.get_layout().offset
if offset == 0:
return True
from .utils import ALIGNMENT
return V.graph.sizevars.statically_known_multiple_of(offset, ALIGNMENT)
@cache_on_self_and_args("NonOwningLayout")
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
assert isinstance(self.view, ReinterpretView)
box = self.view.data
assert isinstance(box, StorageBox), type(box)
input_buffer = box.data
assert isinstance(input_buffer, Buffer), type(box)
return input_buffer.layout.get_free_symbol_uses(unbacked_only)
| NonOwningLayout |
python | great-expectations__great_expectations | docs/docusaurus/versioned_docs/version-0.18/snippets/expect_column_values_to_equal_three.py | {
"start": 1559,
"end": 4997
} | class ____(ColumnMapMetricProvider):
# </snippet>
# This is the id string that will be used to reference your metric.
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py metric_name">
condition_metric_name = "column_values.equal_three"
# </snippet>
# This method implements the core logic for the PandasExecutionEngine
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py pandas">
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column == 3
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py spark_definition">
@metric_partial(
engine=SparkDFExecutionEngine,
partial_fn_type=MetricPartialFunctionTypes.MAP_CONDITION_FN,
domain_type=MetricDomainTypes.COLUMN,
)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py spark_selectable">
(
selectable,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column_name = accessor_domain_kwargs["column"]
column = F.col(column_name)
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py spark_query">
query = F.when(column == 3, F.lit(False)).otherwise(F.lit(True))
return (query, compute_domain_kwargs, accessor_domain_kwargs)
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py sqlalchemy">
@column_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, **kwargs):
return column.in_([3])
# </snippet>
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[Dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration, specifying the metric
types and their respective domains"""
dependencies: Dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
table_domain_kwargs: Dict = {
k: v for k, v in metric.metric_domain_kwargs.items() if k != "column"
}
dependencies["table.column_types"] = MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs=table_domain_kwargs,
metric_value_kwargs={
"include_nested": True,
},
)
return dependencies
# This class defines the Expectation itself
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py ExpectColumnValuesToEqualThree class_def">
| ColumnValuesEqualThree |
python | PrefectHQ__prefect | tests/server/models/test_task_run_states.py | {
"start": 713,
"end": 8511
} | class ____:
async def test_create_task_run_state_succeeds(self, task_run, session):
task_run_state = (
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Running(),
)
).state
assert task_run_state.name == "Running"
assert task_run_state.type == StateType.RUNNING
assert task_run_state.state_details.task_run_id == task_run.id
async def test_run_details_are_updated_entering_running(self, task_run, session):
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Scheduled(),
)
await session.refresh(task_run)
assert task_run.start_time is None
assert task_run.run_count == 0
dt = now("UTC")
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Running(timestamp=dt),
)
await session.refresh(task_run)
assert task_run.start_time == dt
assert task_run.run_count == 1
assert task_run.total_run_time == datetime.timedelta(0)
dt2 = now("UTC")
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Running(timestamp=dt2),
)
await session.commit()
await session.refresh(task_run)
assert task_run.start_time == dt
assert task_run.run_count == 2
assert task_run.total_run_time == (dt2 - dt)
async def test_failed_becomes_awaiting_retry(
self, task_run: TaskRun, client, session
):
# first ensure the task run's flow run is in a running state
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=task_run.flow_run_id,
state=Running(),
force=True,
)
# set max retries to 1
# copy to trigger ORM updates
task_run.empirical_policy = task_run.empirical_policy.model_copy()
task_run.empirical_policy.retries = 1
await session.flush()
(
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Running(),
task_policy=await provide_task_policy(),
)
).state
new_state = (
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Failed(),
task_policy=await provide_task_policy(),
)
).state
assert new_state.name == "AwaitingRetry"
assert new_state.type == StateType.SCHEDULED
async def test_failed_doesnt_retry_if_flag_set(self, task_run, client, session):
# set max retries to 1
# copy to trigger ORM updates
task_run.empirical_policy = task_run.empirical_policy.model_copy()
task_run.empirical_policy.retries = 1
await session.flush()
(
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Running(),
)
).state
new_state = (
await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Failed(),
force=True,
)
).state
assert new_state.type == StateType.FAILED
async def test_database_is_not_updated_when_no_transition_takes_place(
self, task_run, session
):
# first ensure the task run's flow run is in a running state
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=task_run.flow_run_id,
state=Running(),
force=True,
)
# place the run in a scheduled state in the future
trs = await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Scheduled(scheduled_time=now("UTC") + datetime.timedelta(days=30)),
task_policy=await provide_task_policy(),
)
# attempt to put the run in a pending state, which will tell the transition to WAIT
trs2 = await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Running(),
task_policy=await provide_task_policy(),
)
assert trs2.status == schemas.responses.SetStateStatus.WAIT
# the original state remains in place
await session.refresh(task_run)
assert task_run.state.id == trs.state.id
async def test_no_orchestration_with_injected_empty_policy(self, task_run, session):
class EmptyPolicy(BaseOrchestrationPolicy):
@staticmethod
def priority():
return []
with temporary_task_policy(EmptyPolicy):
# place the run in a scheduled state in the future
trs = await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Scheduled(
scheduled_time=now("UTC") + datetime.timedelta(days=30)
),
task_policy=await provide_task_policy(),
)
# put the run in a pending state, which succeeds due to injected orchestration
trs2 = await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Running(),
task_policy=await provide_task_policy(),
)
assert trs2.status == schemas.responses.SetStateStatus.ACCEPT
# the original state remains in place
await session.refresh(task_run)
assert task_run.state.id != trs.state.id
async def test_orchestration_with_injected_parameters(self, task_run, session):
class AbortingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
# this rule mutates the proposed state type, but won't fizzle itself upon exiting
if context.parameters.get("special-signal") == "abort":
await self.abort_transition("wow, aborting this transition")
class AbortingPolicy(BaseOrchestrationPolicy):
@staticmethod
def priority():
return [AbortingRule]
with temporary_task_orchestration_parameters({"special-signal": "abort"}):
with temporary_task_policy(AbortingPolicy):
trs = await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=Scheduled(
scheduled_time=now("UTC") + datetime.timedelta(days=30)
),
task_policy=await provide_task_policy(),
orchestration_parameters=await provide_task_orchestration_parameters(),
)
assert trs.status == schemas.responses.SetStateStatus.ABORT
async def test_object_not_found_if_id_not_found(self, session):
with pytest.raises(ObjectNotFoundError):
await models.task_runs.set_task_run_state(
session=session,
task_run_id=uuid4(),
state=Running(),
)
| TestCreateTaskRunState |
python | plotly__plotly.py | tests/test_optional/test_utils/test_utils.py | {
"start": 14683,
"end": 15578
} | class ____(TestCase):
def test_numpy_integer_import(self):
# should generate a figure with subplots of array and not throw a ValueError
import numpy as np
from plotly.subplots import make_subplots
indices_rows = np.array([1], dtype=int)
indices_cols = np.array([1], dtype=int)
fig = make_subplots(rows=1, cols=1)
fig.add_trace(go.Scatter(y=[1]), row=indices_rows[0], col=indices_cols[0])
data_path = ("data", 0, "y")
value = get_by_path(fig, data_path)
expected_value = (1,)
self.assertEqual(value, expected_value)
def test_get_numpy_int_type(self):
import numpy as np
from _plotly_utils.utils import _get_int_type
int_type_tuple = _get_int_type()
expected_tuple = (int, np.integer)
self.assertEqual(int_type_tuple, expected_tuple)
| TestNumpyIntegerBaseType |
python | doocs__leetcode | solution/2500-2599/2540.Minimum Common Value/Solution.py | {
"start": 0,
"end": 358
} | class ____:
def getCommon(self, nums1: List[int], nums2: List[int]) -> int:
i = j = 0
m, n = len(nums1), len(nums2)
while i < m and j < n:
if nums1[i] == nums2[j]:
return nums1[i]
if nums1[i] < nums2[j]:
i += 1
else:
j += 1
return -1
| Solution |
python | pypa__setuptools | setuptools/_vendor/wheel/cli/__init__.py | {
"start": 151,
"end": 4402
} | class ____(Exception):
pass
def unpack_f(args: argparse.Namespace) -> None:
from .unpack import unpack
unpack(args.wheelfile, args.dest)
def pack_f(args: argparse.Namespace) -> None:
from .pack import pack
pack(args.directory, args.dest_dir, args.build_number)
def convert_f(args: argparse.Namespace) -> None:
from .convert import convert
convert(args.files, args.dest_dir, args.verbose)
def tags_f(args: argparse.Namespace) -> None:
from .tags import tags
names = (
tags(
wheel,
args.python_tag,
args.abi_tag,
args.platform_tag,
args.build,
args.remove,
)
for wheel in args.wheel
)
for name in names:
print(name)
def version_f(args: argparse.Namespace) -> None:
from .. import __version__
print(f"wheel {__version__}")
def parse_build_tag(build_tag: str) -> str:
if build_tag and not build_tag[0].isdigit():
raise ArgumentTypeError("build tag must begin with a digit")
elif "-" in build_tag:
raise ArgumentTypeError("invalid character ('-') in build tag")
return build_tag
TAGS_HELP = """\
Make a new wheel with given tags. Any tags unspecified will remain the same.
Starting the tags with a "+" will append to the existing tags. Starting with a
"-" will remove a tag (use --option=-TAG syntax). Multiple tags can be
separated by ".". The original file will remain unless --remove is given. The
output filename(s) will be displayed on stdout for further processing.
"""
def parser():
p = argparse.ArgumentParser()
s = p.add_subparsers(help="commands")
unpack_parser = s.add_parser("unpack", help="Unpack wheel")
unpack_parser.add_argument(
"--dest", "-d", help="Destination directory", default="."
)
unpack_parser.add_argument("wheelfile", help="Wheel file")
unpack_parser.set_defaults(func=unpack_f)
repack_parser = s.add_parser("pack", help="Repack wheel")
repack_parser.add_argument("directory", help="Root directory of the unpacked wheel")
repack_parser.add_argument(
"--dest-dir",
"-d",
default=os.path.curdir,
help="Directory to store the wheel (default %(default)s)",
)
repack_parser.add_argument(
"--build-number", help="Build tag to use in the wheel name"
)
repack_parser.set_defaults(func=pack_f)
convert_parser = s.add_parser("convert", help="Convert egg or wininst to wheel")
convert_parser.add_argument("files", nargs="*", help="Files to convert")
convert_parser.add_argument(
"--dest-dir",
"-d",
default=os.path.curdir,
help="Directory to store wheels (default %(default)s)",
)
convert_parser.add_argument("--verbose", "-v", action="store_true")
convert_parser.set_defaults(func=convert_f)
tags_parser = s.add_parser(
"tags", help="Add or replace the tags on a wheel", description=TAGS_HELP
)
tags_parser.add_argument("wheel", nargs="*", help="Existing wheel(s) to retag")
tags_parser.add_argument(
"--remove",
action="store_true",
help="Remove the original files, keeping only the renamed ones",
)
tags_parser.add_argument(
"--python-tag", metavar="TAG", help="Specify an interpreter tag(s)"
)
tags_parser.add_argument("--abi-tag", metavar="TAG", help="Specify an ABI tag(s)")
tags_parser.add_argument(
"--platform-tag", metavar="TAG", help="Specify a platform tag(s)"
)
tags_parser.add_argument(
"--build", type=parse_build_tag, metavar="BUILD", help="Specify a build tag"
)
tags_parser.set_defaults(func=tags_f)
version_parser = s.add_parser("version", help="Print version and exit")
version_parser.set_defaults(func=version_f)
help_parser = s.add_parser("help", help="Show this help")
help_parser.set_defaults(func=lambda args: p.print_help())
return p
def main():
p = parser()
args = p.parse_args()
if not hasattr(args, "func"):
p.print_help()
else:
try:
args.func(args)
return 0
except WheelError as e:
print(e, file=sys.stderr)
return 1
| WheelError |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 15101,
"end": 15317
} | class ____(_VectorizerConfigCreate):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.IMG2VEC_NEURAL, frozen=True, exclude=True
)
imageFields: List[str]
| _Img2VecNeuralConfig |
python | pydata__xarray | xarray/tests/test_duck_array_wrapping.py | {
"start": 6954,
"end": 17942
} | class ____(_BaseTest):
@pytest.fixture(autouse=True)
def setUp(self, request, namespace):
self.setup_for_test(request, namespace)
self.x = self.get_test_dataarray()
def test_loc(self):
result = self.x.loc[{"x": slice(1, 3)}]
assert isinstance(result.data, self.Array)
def test_isel(self):
result = self.x.isel(x=slice(1, 3))
assert isinstance(result.data, self.Array)
def test_sel(self):
result = self.x.sel(x=slice(1, 3))
assert isinstance(result.data, self.Array)
def test_squeeze(self):
result = self.x.squeeze("y")
assert isinstance(result.data, self.Array)
@pytest.mark.xfail(reason="interp uses numpy and scipy")
def test_interp(self):
# TODO: some cases could be made to work
result = self.x.interp(x=2.5)
assert isinstance(result.data, self.Array)
def test_isnull(self):
result = self.x.isnull()
assert isinstance(result.data, self.Array)
def test_notnull(self):
result = self.x.notnull()
assert isinstance(result.data, self.Array)
def test_count(self):
result = self.x.count()
assert isinstance(result.data, self.Array)
def test_dropna(self):
result = self.x.dropna(dim="x")
assert isinstance(result.data, self.Array)
def test_fillna(self):
result = self.x.fillna(0)
assert isinstance(result.data, self.Array)
@pytest.mark.xfail(reason="ffill uses bottleneck or numbagg")
def test_ffill(self):
result = self.x.ffill()
assert isinstance(result.data, self.Array)
@pytest.mark.xfail(reason="bfill uses bottleneck or numbagg")
def test_bfill(self):
result = self.x.bfill()
assert isinstance(result.data, self.Array)
@pytest.mark.xfail(reason="interpolate_na uses numpy and scipy")
def test_interpolate_na(self):
result = self.x.interpolate_na()
assert isinstance(result.data, self.Array)
def test_where(self):
result = self.x.where(self.x > 2)
assert isinstance(result.data, self.Array)
def test_isin(self):
test_elements = self.constructor(np.asarray([1]))
result = self.x.isin(test_elements)
assert isinstance(result.data, self.Array)
def test_groupby(self):
result = self.x.groupby("x").mean()
assert isinstance(result.data, self.Array)
def test_groupby_bins(self):
result = self.x.groupby_bins("x", bins=[0, 2, 4, 6]).mean()
assert isinstance(result.data, self.Array)
def test_rolling_iter(self):
result = self.x.rolling(x=3)
elem = next(iter(result))[1]
assert isinstance(elem.data, self.Array)
def test_rolling_construct(self):
result = self.x.rolling(x=3).construct(x="window")
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_rolling_reduce(self, skipna):
result = self.x.rolling(x=3).mean(skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.xfail(reason="rolling_exp uses numbagg")
def test_rolling_exp_reduce(self):
result = self.x.rolling_exp(x=3).mean()
assert isinstance(result.data, self.Array)
def test_cumulative_iter(self):
result = self.x.cumulative("x")
elem = next(iter(result))[1]
assert isinstance(elem.data, self.Array)
def test_cumulative_construct(self):
result = self.x.cumulative("x").construct(x="window")
assert isinstance(result.data, self.Array)
def test_cumulative_reduce(self):
result = self.x.cumulative("x").sum()
assert isinstance(result.data, self.Array)
def test_weighted(self):
result = self.x.weighted(self.x.fillna(0)).mean()
assert isinstance(result.data, self.Array)
def test_coarsen_construct(self):
result = self.x.coarsen(x=2, boundary="pad").construct(x=["a", "b"])
assert isinstance(result.data, self.Array)
def test_coarsen_reduce(self):
result = self.x.coarsen(x=2, boundary="pad").mean()
assert isinstance(result.data, self.Array)
def test_resample(self):
time_coord = pd.date_range("2000-01-01", periods=5)
result = self.x.assign_coords(x=time_coord).resample(x="D").mean()
assert isinstance(result.data, self.Array)
def test_diff(self):
result = self.x.diff("x")
assert isinstance(result.data, self.Array)
def test_dot(self):
result = self.x.dot(self.x)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_quantile(self, skipna):
result = self.x.quantile(0.5, skipna=skipna)
assert isinstance(result.data, self.Array)
def test_differentiate(self):
# edge_order is not implemented in jax, and only supports passing None
edge_order = None if self.namespace == "jax.numpy" else 1
result = self.x.differentiate("x", edge_order=edge_order)
assert isinstance(result.data, self.Array)
def test_integrate(self):
result = self.x.integrate("x")
assert isinstance(result.data, self.Array)
@pytest.mark.xfail(reason="polyfit uses numpy linalg")
def test_polyfit(self):
# TODO: this could work, there are just a lot of different linalg calls
result = self.x.polyfit("x", 1)
assert isinstance(result.polyfit_coefficients.data, self.Array)
def test_map_blocks(self):
result = self.x.map_blocks(lambda x: x + 1)
assert isinstance(result.data, self.Array)
def test_all(self):
result = self.x.all(dim="x")
assert isinstance(result.data, self.Array)
def test_any(self):
result = self.x.any(dim="x")
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_argmax(self, skipna):
result = self.x.argmax(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_argmin(self, skipna):
result = self.x.argmin(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_idxmax(self, skipna):
result = self.x.idxmax(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_idxmin(self, skipna):
result = self.x.idxmin(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_max(self, skipna):
result = self.x.max(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_min(self, skipna):
result = self.x.min(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_mean(self, skipna):
result = self.x.mean(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_median(self, skipna):
result = self.x.median(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_prod(self, skipna):
result = self.x.prod(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_sum(self, skipna):
result = self.x.sum(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_std(self, skipna):
result = self.x.std(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_var(self, skipna):
result = self.x.var(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_cumsum(self, skipna):
result = self.x.cumsum(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
@pytest.mark.parametrize("skipna", [True, False])
def test_cumprod(self, skipna):
result = self.x.cumprod(dim="x", skipna=skipna)
assert isinstance(result.data, self.Array)
def test_argsort(self):
result = self.x.argsort()
assert isinstance(result.data, self.Array)
def test_astype(self):
result = self.x.astype(int)
assert isinstance(result.data, self.Array)
def test_clip(self):
result = self.x.clip(min=2.0, max=4.0)
assert isinstance(result.data, self.Array)
def test_conj(self):
result = self.x.conj()
assert isinstance(result.data, self.Array)
def test_conjugate(self):
result = self.x.conjugate()
assert isinstance(result.data, self.Array)
def test_imag(self):
result = self.x.imag
assert isinstance(result.data, self.Array)
def test_searchsorted(self):
v = self.constructor(np.asarray([3]))
result = self.x.squeeze().searchsorted(v)
assert isinstance(result, self.Array)
def test_round(self):
result = self.x.round()
assert isinstance(result.data, self.Array)
def test_real(self):
result = self.x.real
assert isinstance(result.data, self.Array)
def test_T(self):
result = self.x.T
assert isinstance(result.data, self.Array)
@pytest.mark.xfail(reason="rank uses bottleneck")
def test_rank(self):
# TODO: scipy has rankdata, as does jax, so this can work
result = self.x.rank()
assert isinstance(result.data, self.Array)
def test_transpose(self):
result = self.x.transpose()
assert isinstance(result.data, self.Array)
def test_stack(self):
result = self.x.stack(z=("x", "y"))
assert isinstance(result.data, self.Array)
def test_unstack(self):
result = self.x.stack(z=("x", "y")).unstack("z")
assert isinstance(result.data, self.Array)
def test_shift(self):
result = self.x.shift(x=1)
assert isinstance(result.data, self.Array)
def test_roll(self):
result = self.x.roll(x=1)
assert isinstance(result.data, self.Array)
def test_pad(self):
result = self.x.pad(x=1)
assert isinstance(result.data, self.Array)
def test_sortby(self):
result = self.x.sortby("x")
assert isinstance(result.data, self.Array)
def test_broadcast_like(self):
result = self.x.broadcast_like(self.x)
assert isinstance(result.data, self.Array)
| TestDataArrayMethods |
python | realpython__materials | simulation-with-simpy/simulate.py | {
"start": 228,
"end": 3099
} | class ____(object):
def __init__(self, env, num_cashiers, num_servers, num_ushers):
self.env = env
self.cashier = simpy.Resource(env, num_cashiers)
self.server = simpy.Resource(env, num_servers)
self.usher = simpy.Resource(env, num_ushers)
def purchase_ticket(self, moviegoer):
yield self.env.timeout(random.randint(1, 3))
def check_ticket(self, moviegoer):
yield self.env.timeout(3 / 60)
def sell_food(self, moviegoer):
yield self.env.timeout(random.randint(1, 5))
def go_to_movies(env, moviegoer, theater):
# Moviegoer arrives at the theater
arrival_time = env.now
with theater.cashier.request() as request:
yield request
yield env.process(theater.purchase_ticket(moviegoer))
with theater.usher.request() as request:
yield request
yield env.process(theater.check_ticket(moviegoer))
if random.choice([True, False]):
with theater.server.request() as request:
yield request
yield env.process(theater.sell_food(moviegoer))
# Moviegoer heads into the theater
wait_times.append(env.now - arrival_time)
def run_theater(env, num_cashiers, num_servers, num_ushers):
theater = Theater(env, num_cashiers, num_servers, num_ushers)
for moviegoer in range(3):
env.process(go_to_movies(env, moviegoer, theater))
while True:
yield env.timeout(0.20) # Wait a bit before generating a new person
moviegoer += 1
env.process(go_to_movies(env, moviegoer, theater))
def get_average_wait_time(wait_times):
average_wait = statistics.mean(wait_times)
# Pretty print the results
minutes, frac_minutes = divmod(average_wait, 1)
seconds = frac_minutes * 60
return round(minutes), round(seconds)
def get_user_input():
num_cashiers = input("Input # of cashiers working: ")
num_servers = input("Input # of servers working: ")
num_ushers = input("Input # of ushers working: ")
params = [num_cashiers, num_servers, num_ushers]
if all(str(i).isdigit() for i in params): # Check input is valid
params = [int(x) for x in params]
else:
print(
"Could not parse input. Simulation will use default values:",
"\n1 cashier, 1 server, 1 usher.",
)
params = [1, 1, 1]
return params
def main():
# Setup
random.seed(42)
num_cashiers, num_servers, num_ushers = get_user_input()
# Run the simulation
env = simpy.Environment()
env.process(run_theater(env, num_cashiers, num_servers, num_ushers))
env.run(until=90)
# View the results
mins, secs = get_average_wait_time(wait_times)
print(
"Running simulation...",
f"\nThe average wait time is {mins} minutes and {secs} seconds.",
)
if __name__ == "__main__":
main()
| Theater |
python | pypa__setuptools | setuptools/_vendor/tomli/_parser.py | {
"start": 4177,
"end": 6204
} | class ____:
"""Flags that map to parsed keys/namespaces."""
# Marks an immutable namespace (inline array or inline table).
FROZEN = 0
# Marks a nest that has been explicitly created and can no longer
# be opened using the "[table]" syntax.
EXPLICIT_NEST = 1
def __init__(self) -> None:
self._flags: dict[str, dict] = {}
self._pending_flags: set[tuple[Key, int]] = set()
def add_pending(self, key: Key, flag: int) -> None:
self._pending_flags.add((key, flag))
def finalize_pending(self) -> None:
for key, flag in self._pending_flags:
self.set(key, flag, recursive=False)
self._pending_flags.clear()
def unset_all(self, key: Key) -> None:
cont = self._flags
for k in key[:-1]:
if k not in cont:
return
cont = cont[k]["nested"]
cont.pop(key[-1], None)
def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
cont = self._flags
key_parent, key_stem = key[:-1], key[-1]
for k in key_parent:
if k not in cont:
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont = cont[k]["nested"]
if key_stem not in cont:
cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
def is_(self, key: Key, flag: int) -> bool:
if not key:
return False # document root has no flags
cont = self._flags
for k in key[:-1]:
if k not in cont:
return False
inner_cont = cont[k]
if flag in inner_cont["recursive_flags"]:
return True
cont = inner_cont["nested"]
key_stem = key[-1]
if key_stem in cont:
cont = cont[key_stem]
return flag in cont["flags"] or flag in cont["recursive_flags"]
return False
| Flags |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/microsoft/tests.py | {
"start": 407,
"end": 2255
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = MicrosoftGraphProvider.id
def get_mocked_response(self):
response_data = """
{
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#users/$entity",
"id": "16f5a7b6-5a15-4568-aa5a-31bb117e9967",
"businessPhones": [],
"displayName": "Anne Weiler",
"givenName": "Anne",
"jobTitle": "Manufacturing Lead",
"mail": "annew@CIE493742.onmicrosoft.com",
"mobilePhone": "+1 3528700812",
"officeLocation": null,
"preferredLanguage": "en-US",
"surname": "Weiler",
"userPrincipalName": "annew@CIE493742.onmicrosoft.com",
"mailNickname": "annew"
}
""" # noqa
return MockedResponse(HTTPStatus.OK, response_data)
def get_expected_to_str(self):
return "annew@CIE493742.onmicrosoft.com"
def test_invalid_data(self):
response = MockedResponse(HTTPStatus.OK, json.dumps({}))
with self.assertRaises(OAuth2Error):
# No id, raises
_check_errors(response)
def test_profile_invalid_response(self):
data = {
"error": {
"code": "InvalidAuthenticationToken",
"message": "Access token validation failure. Invalid audience.",
}
}
response = MockedResponse(HTTPStatus.UNAUTHORIZED, json.dumps(data))
with self.assertRaises(OAuth2Error):
# no id, 4xx code, raises with message
_check_errors(response)
def test_invalid_response(self):
response = MockedResponse(HTTPStatus.OK, "invalid json data")
with self.assertRaises(OAuth2Error):
# bad json, raises
_check_errors(response)
| MicrosoftGraphTests |
python | jd__tenacity | tenacity/wait.py | {
"start": 7079,
"end": 8361
} | class ____(wait_exponential):
"""Random wait with exponentially widening window.
An exponential backoff strategy used to mediate contention between multiple
uncoordinated processes for a shared resource in distributed systems. This
is the sense in which "exponential backoff" is meant in e.g. Ethernet
networking, and corresponds to the "Full Jitter" algorithm described in
this blog post:
https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
Each retry occurs at a random time in a geometrically expanding interval.
It allows for a custom multiplier and an ability to restrict the upper
limit of the random interval to some maximum value.
Example::
wait_random_exponential(multiplier=0.5, # initial window 0.5s
max=60) # max 60s timeout
When waiting for an unavailable resource to become available again, as
opposed to trying to resolve contention for a shared resource, the
wait_exponential strategy (which uses a fixed interval) may be preferable.
"""
def __call__(self, retry_state: "RetryCallState") -> float:
high = super().__call__(retry_state=retry_state)
return random.uniform(self.min, high)
| wait_random_exponential |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 314794,
"end": 315915
} | class ____(Request):
"""
Get the list of task hyper parameters
:param tasks: Task IDs
:type tasks: Sequence[str]
"""
_service = "tasks"
_action = "get_hyper_params"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
}
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks: List[str], **kwargs: Any) -> None:
super(GetHyperParamsRequest, self).__init__(**kwargs)
self.tasks = tasks
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
| GetHyperParamsRequest |
python | pallets__quart | src/quart/typing.py | {
"start": 10005,
"end": 10094
} | class ____(Protocol):
def is_set(self) -> bool: ...
def set(self) -> None: ...
| Event |
python | ray-project__ray | python/ray/serve/_private/benchmarks/streaming/_grpc/test_server_pb2_grpc.py | {
"start": 330,
"end": 2087
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Unary = channel.unary_unary(
"/GRPCTestServer/Unary",
request_serializer=backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Request.SerializeToString,
response_deserializer=backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Response.FromString,
)
self.ClientStreaming = channel.stream_unary(
"/GRPCTestServer/ClientStreaming",
request_serializer=backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Request.SerializeToString,
response_deserializer=backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Response.FromString,
)
self.ServerStreaming = channel.unary_stream(
"/GRPCTestServer/ServerStreaming",
request_serializer=backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Request.SerializeToString,
response_deserializer=backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Response.FromString,
)
self.BidiStreaming = channel.stream_stream(
"/GRPCTestServer/BidiStreaming",
request_serializer=backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Request.SerializeToString,
response_deserializer=backend_dot_server_dot_common_dot_clients_dot_grpc_dot_proto_dot_test__server__pb2.Response.FromString,
)
| GRPCTestServerStub |
python | optuna__optuna | optuna/cli.py | {
"start": 17843,
"end": 19510
} | class ____(_BaseCommand):
"""Show the best trial."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--study-name",
type=str,
required=True,
help="The name of the study to get the best trial.",
)
parser.add_argument(
"-f",
"--format",
type=str,
choices=("value", "json", "table", "yaml"),
default="table",
help="Output format.",
)
parser.add_argument(
"--flatten",
default=False,
action="store_true",
help="Flatten nested columns such as params and user_attrs.",
)
def take_action(self, parsed_args: Namespace) -> int:
optuna_warn(
"'best-trial' is an experimental CLI command. The interface can change in the future.",
ExperimentalWarning,
)
storage = _get_storage(parsed_args.storage, parsed_args.storage_class)
study = optuna.load_study(storage=storage, study_name=parsed_args.study_name)
attrs = (
"number",
"value" if not study._is_multi_objective() else "values",
"datetime_start",
"datetime_complete",
"duration",
"params",
"user_attrs",
"state",
)
records, columns = _dataframe._create_records_and_aggregate_column(study, attrs)
print(
_format_output(
records[study.best_trial.number], columns, parsed_args.format, parsed_args.flatten
)
)
return 0
| _BestTrial |
python | django__django | django/contrib/gis/db/backends/spatialite/operations.py | {
"start": 1026,
"end": 8608
} | class ____(BaseSpatialOperations, DatabaseOperations):
name = "spatialite"
spatialite = True
Adapter = SpatiaLiteAdapter
collect = "Collect"
extent = "Extent"
makeline = "MakeLine"
unionagg = "GUnion"
from_text = "GeomFromText"
gis_operators = {
# Binary predicates
"equals": SpatialiteNullCheckOperator(func="Equals"),
"disjoint": SpatialiteNullCheckOperator(func="Disjoint"),
"touches": SpatialiteNullCheckOperator(func="Touches"),
"crosses": SpatialiteNullCheckOperator(func="Crosses"),
"within": SpatialiteNullCheckOperator(func="Within"),
"overlaps": SpatialiteNullCheckOperator(func="Overlaps"),
"contains": SpatialiteNullCheckOperator(func="Contains"),
"intersects": SpatialiteNullCheckOperator(func="Intersects"),
"relate": SpatialiteNullCheckOperator(func="Relate"),
"coveredby": SpatialiteNullCheckOperator(func="CoveredBy"),
"covers": SpatialiteNullCheckOperator(func="Covers"),
# Returns true if B's bounding box completely contains A's bounding
# box.
"contained": SpatialOperator(func="MbrWithin"),
# Returns true if A's bounding box completely contains B's bounding
# box.
"bbcontains": SpatialOperator(func="MbrContains"),
# Returns true if A's bounding box overlaps B's bounding box.
"bboverlaps": SpatialOperator(func="MbrOverlaps"),
# These are implemented here as synonyms for Equals
"same_as": SpatialiteNullCheckOperator(func="Equals"),
"exact": SpatialiteNullCheckOperator(func="Equals"),
# Distance predicates
"dwithin": SpatialOperator(func="PtDistWithin"),
}
disallowed_aggregates = (models.Extent3D,)
select = "CAST (AsEWKB(%s) AS BLOB)"
function_names = {
"AsWKB": "St_AsBinary",
"BoundingCircle": "GEOSMinimumBoundingCircle",
"ForcePolygonCW": "ST_ForceLHR",
"FromWKB": "ST_GeomFromWKB",
"FromWKT": "ST_GeomFromText",
"IsEmpty": "ST_IsEmpty",
"Length": "ST_Length",
"LineLocatePoint": "ST_Line_Locate_Point",
"NumDimensions": "ST_NDims",
"NumPoints": "ST_NPoints",
"Reverse": "ST_Reverse",
"Scale": "ScaleCoords",
"Translate": "ST_Translate",
"Union": "ST_Union",
}
@cached_property
def unsupported_functions(self):
unsupported = {"GeometryDistance", "MemSize", "Rotate"}
if not self.geom_lib_version():
unsupported |= {"Azimuth", "GeoHash", "MakeValid"}
if self.spatial_version < (5, 1):
unsupported |= {"BoundingCircle"}
return unsupported
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as exc:
raise ImproperlyConfigured(
'Cannot determine the SpatiaLite version for the "%s" database. '
"Was the SpatiaLite initialization SQL loaded on this database?"
% (self.connection.settings_dict["NAME"],)
) from exc
if version < (4, 3, 0):
raise ImproperlyConfigured("GeoDjango supports SpatiaLite 4.3.0 and above.")
return version
def convert_extent(self, box):
"""
Convert the polygon data received from SpatiaLite to min/max values.
"""
if box is None:
return None
shell = GEOSGeometry(box).shell
xmin, ymin = shell[0][:2]
xmax, ymax = shell[2][:2]
return (xmin, ymin, xmax, ymax)
def geo_db_type(self, f):
"""
Return None because geometry columns are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
if lookup_type == "dwithin":
raise ValueError(
"Only numeric values of degree units are allowed on "
"geographic DWithin queries."
)
dist_param = value.m
else:
dist_param = getattr(
value, Distance.unit_attname(f.units_name(self.connection))
)
else:
dist_param = value
return [dist_param]
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occurring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute("SELECT %s" % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Return the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func("geos_version()")
def proj_version(self):
"""Return the version of the PROJ library used by SpatiaLite."""
return self._get_spatialite_func("proj4_version()")
def lwgeom_version(self):
"""Return the version of LWGEOM library used by SpatiaLite."""
return self._get_spatialite_func("lwgeom_version()")
def rttopo_version(self):
"""Return the version of RTTOPO library used by SpatiaLite."""
return self._get_spatialite_func("rttopo_version()")
def geom_lib_version(self):
"""
Return the version of the version-dependant geom library used by
SpatiaLite.
"""
if self.spatial_version >= (5,):
return self.rttopo_version()
else:
return self.lwgeom_version()
def spatialite_version(self):
"Return the SpatiaLite library version as a string."
return self._get_spatialite_func("spatialite_version()")
def spatialite_version_tuple(self):
"""
Return the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
version = self.spatialite_version()
return (version, *get_version_tuple(version))
def spatial_aggregate_name(self, agg_name):
"""
Return the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = "unionagg" if agg_name.lower() == "union" else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import (
SpatialiteGeometryColumns,
)
return SpatialiteGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import (
SpatialiteSpatialRefSys,
)
return SpatialiteSpatialRefSys
def get_geometry_converter(self, expression):
geom_class = expression.output_field.geom_class
read = wkb_r().read
def converter(value, expression, connection):
return None if value is None else GEOSGeometryBase(read(value), geom_class)
return converter
| SpatiaLiteOperations |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_assets.py | {
"start": 26652,
"end": 27916
} | class ____(TestAssetAliases):
@pytest.mark.parametrize(
("url", "expected_asset_aliases"),
[
# Limit test data
("/assets/aliases?limit=1", ["simple1"]),
("/assets/aliases?limit=100", [f"simple{i}" for i in range(1, 101)]),
# Offset test data
("/assets/aliases?offset=1", [f"simple{i}" for i in range(2, 52)]),
("/assets/aliases?offset=3", [f"simple{i}" for i in range(4, 54)]),
# Limit and offset test data
("/assets/aliases?offset=3&limit=3", ["simple4", "simple5", "simple6"]),
],
)
def test_limit_and_offset(self, test_client, url, expected_asset_aliases):
self.create_asset_aliases(num=110)
response = test_client.get(url)
assert response.status_code == 200
alias_names = [asset["name"] for asset in response.json()["asset_aliases"]]
assert alias_names == expected_asset_aliases
def test_should_respect_page_size_limit_default(self, test_client):
self.create_asset_aliases(num=110)
response = test_client.get("/assets/aliases")
assert response.status_code == 200
assert len(response.json()["asset_aliases"]) == 50
| TestGetAssetAliasesEndpointPagination |
python | explosion__spaCy | spacy/schemas.py | {
"start": 9577,
"end": 9783
} | class ____(str, Enum):
plus: StrictStr = StrictStr("+")
star: StrictStr = StrictStr("*")
question: StrictStr = StrictStr("?")
exclamation: StrictStr = StrictStr("!")
| TokenPatternOperatorSimple |
python | run-llama__llama_index | llama-index-core/llama_index/core/ingestion/transformations.py | {
"start": 10583,
"end": 11921
} | class ____(BaseModel, Generic[T]):
"""
A class containing metadata & implementation for a transformation in a pipeline.
"""
name: str
component: SerializeAsAny[T] = Field(
description="Component that implements the transformation"
)
@classmethod
def from_component(cls, component: BaseComponent) -> "ConfiguredTransformation":
"""
Build a ConfiguredTransformation from a component.
This should be the preferred way to build a ConfiguredTransformation
as it will ensure that the component is supported as indicated by having a
corresponding enum value in ConfigurableTransformations.
This has the added bonus that you don't need to specify the generic type
like ConfiguredTransformation[SentenceSplitter]. The return value of
this ConfiguredTransformation.from_component(simple_node_parser) will be
ConfiguredTransformation[SentenceSplitter] if simple_node_parser is
a SentenceSplitter.
"""
return ConfigurableTransformations.from_component(
component
).build_configured_transformation(component)
@property
def configurable_transformation_type(self) -> ConfigurableComponent:
return ConfigurableTransformations.from_component(self.component)
| ConfiguredTransformation |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/constant_op_test.py | {
"start": 19616,
"end": 24237
} | class ____(test.TestCase):
def _compareZeros(self, dtype, fully_defined_shape, use_gpu):
with self.cached_session(use_gpu=use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object_ (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.bytes_
else:
numpy_dtype = dtype.as_numpy_dtype
if fully_defined_shape:
d = constant_op.constant(
np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
else:
d = array_ops.placeholder(dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
if fully_defined_shape:
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
feed_dict = {}
if not fully_defined_shape:
feed_dict[d] = np.ones((2, 3), dtype=numpy_dtype)
z_value = z_var.eval(feed_dict=feed_dict)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
@test_util.run_deprecated_v1
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int8, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.bool,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.string
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=False)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=False)
@test_util.run_deprecated_v1
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.complex64,
dtypes_lib.complex128, dtypes_lib.bool
]:
self._compareZeros(dtype, fully_defined_shape=False, use_gpu=True)
self._compareZeros(dtype, fully_defined_shape=True, use_gpu=True)
@test_util.run_deprecated_v1
def testZerosLikePartialShape(self):
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
z = array_ops.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
@test_util.run_deprecated_v1
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
with self.cached_session():
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
@test_util.run_deprecated_v1
def testZerosLikeVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported AND we register a
# ZerosLike callback for GPU for Variant storing primitive types
# in variant_op_registry.cc.
with self.session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const_variant = constant_op.constant(variant_tensor)
zeros_like = array_ops.zeros_like(const_variant)
zeros_like_op = logging_ops.Print(
zeros_like, [const_variant, zeros_like],
message="Variant storing an int, input and output of zeros_like:").op
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
zeros_like_op.run()
| ZerosLikeTest |
python | getsentry__sentry | src/sentry/grouping/variants.py | {
"start": 2063,
"end": 2465
} | class ____(BaseVariant):
"""A checksum variant returns a single hardcoded hash."""
type = "checksum"
description = "legacy checksum"
def __init__(self, checksum: str):
self.checksum = checksum
def get_hash(self) -> str | None:
return self.checksum
def _get_metadata_as_dict(self) -> Mapping[str, str]:
return {"checksum": self.checksum}
| ChecksumVariant |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 14252,
"end": 18744
} | class ____:
def __init__(self, string, identifiers):
self.identifier = None
self.base = None
self.index = None
self.start = -1
self.end = -1
self._identifiers = identifiers
self._may_have_internal_variables = False
try:
self._split(string)
except ValueError:
pass
else:
self._finalize()
def get_replaced_base(self, variables):
if self._may_have_internal_variables:
return variables.replace_string(self.base)
return self.base
def _finalize(self):
self.identifier = self._variable_chars[0]
self.base = ''.join(self._variable_chars[2:-1])
self.end = self.start + len(self._variable_chars)
if self._has_list_or_dict_variable_index():
self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1])
self.end += len(self._list_and_dict_variable_index_chars)
def _has_list_or_dict_variable_index(self):
return self._list_and_dict_variable_index_chars\
and self._list_and_dict_variable_index_chars[-1] == ']'
def _split(self, string):
start_index, max_index = self._find_variable(string)
self.start = start_index
self._open_curly = 1
self._state = self._variable_state
self._variable_chars = [string[start_index], '{']
self._list_and_dict_variable_index_chars = []
self._string = string
start_index += 2
for index, char in enumerate(string[start_index:]):
index += start_index # Giving start to enumerate only in Py 2.6+
try:
self._state(char, index)
except StopIteration:
return
if index == max_index and not self._scanning_list_variable_index():
return
def _scanning_list_variable_index(self):
return self._state in [self._waiting_list_variable_index_state,
self._list_variable_index_state]
def _find_variable(self, string):
max_end_index = string.rfind('}')
if max_end_index == -1:
raise ValueError('No variable end found')
if self._is_escaped(string, max_end_index):
return self._find_variable(string[:max_end_index])
start_index = self._find_start_index(string, 1, max_end_index)
if start_index == -1:
raise ValueError('No variable start found')
return start_index, max_end_index
def _find_start_index(self, string, start, end):
index = string.find('{', start, end) - 1
if index < 0:
return -1
if self._start_index_is_ok(string, index):
return index
return self._find_start_index(string, index+2, end)
def _start_index_is_ok(self, string, index):
return string[index] in self._identifiers\
and not self._is_escaped(string, index)
def _is_escaped(self, string, index):
escaped = False
while index > 0 and string[index-1] == '\\':
index -= 1
escaped = not escaped
return escaped
def _variable_state(self, char, index):
self._variable_chars.append(char)
if char == '}' and not self._is_escaped(self._string, index):
self._open_curly -= 1
if self._open_curly == 0:
if not self._is_list_or_dict_variable():
raise StopIteration
self._state = self._waiting_list_variable_index_state
elif char in self._identifiers:
self._state = self._internal_variable_start_state
def _is_list_or_dict_variable(self):
return self._variable_chars[0] in ('@','&')
def _internal_variable_start_state(self, char, index):
self._state = self._variable_state
if char == '{':
self._variable_chars.append(char)
self._open_curly += 1
self._may_have_internal_variables = True
else:
self._variable_state(char, index)
def _waiting_list_variable_index_state(self, char, index):
if char != '[':
raise StopIteration
self._list_and_dict_variable_index_chars.append(char)
self._state = self._list_variable_index_state
def _list_variable_index_state(self, char, index):
self._list_and_dict_variable_index_chars.append(char)
if char == ']':
raise StopIteration
| VariableSplitter |
python | huggingface__transformers | src/transformers/models/deberta/modeling_deberta.py | {
"start": 20378,
"end": 21455
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = DebertaAttention(config)
self.intermediate = DebertaIntermediate(config)
self.output = DebertaOutput(config)
def forward(
self,
hidden_states,
attention_mask,
query_states=None,
relative_pos=None,
rel_embeddings=None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
attention_output, att_matrix = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if output_attentions:
return (layer_output, att_matrix)
else:
return (layer_output, None)
| DebertaLayer |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 6286,
"end": 6894
} | class ____(_ConfigUpdateModel):
type_: Optional[PQEncoderType]
distribution: Optional[PQEncoderDistribution]
def merge_with_existing(self, schema: Dict[str, Any]) -> Dict[str, Any]:
"""Must be done manually since Pydantic does not work well with type and type_.
Errors shadowing type occur if we want to use type as a field name.
"""
if self.type_ is not None:
schema["type"] = str(self.type_.value)
if self.distribution is not None:
schema["distribution"] = str(self.distribution.value)
return schema
| _PQEncoderConfigUpdate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.