language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_axis44.py
|
{
"start": 315,
"end": 1790
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis44.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [108176896, 108178816]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis(
{
"name": "XXX",
"name_font": {"rotation": -45, "baseline": -1},
"num_font": {"rotation": -45, "baseline": -1},
}
)
chart.set_y_axis(
{
"name": "YYY",
"name_font": {"rotation": -45, "baseline": -1},
"num_font": {"rotation": -45, "baseline": -1},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
PyCQA__pyflakes
|
pyflakes/test/test_api.py
|
{
"start": 673,
"end": 837
}
|
class ____:
"""
Mock an AST node.
"""
def __init__(self, lineno, col_offset=0):
self.lineno = lineno
self.col_offset = col_offset
|
Node
|
python
|
huggingface__transformers
|
src/transformers/utils/hp_naming.py
|
{
"start": 631,
"end": 4979
}
|
class ____:
PREFIX = "hp"
DEFAULTS = {}
NAMING_INFO = None
@classmethod
def set_defaults(cls, prefix, defaults):
cls.PREFIX = prefix
cls.DEFAULTS = defaults
cls.build_naming_info()
@staticmethod
def shortname_for_word(info, word):
if len(word) == 0:
return ""
short_word = None
if any(char.isdigit() for char in word):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number")
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(word) + 1):
prefix = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
short_word = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(integer):
s = ""
while integer != 0:
s = chr(ord("A") + integer % 10) + s
integer //= 10
return s
i = 0
while True:
sword = word + "#" + int_to_alphabetic(i)
if sword in info["reverse_short_word"]:
continue
else:
short_word = sword
break
info["short_word"][word] = short_word
info["reverse_short_word"][short_word] = word
return short_word
@staticmethod
def shortname_for_key(info, param_name):
words = param_name.split("_")
shortname_parts = [TrialShortNamer.shortname_for_word(info, word) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
separators = ["", "_"]
for separator in separators:
shortname = separator.join(shortname_parts)
if shortname not in info["reverse_short_param"]:
info["short_param"][param_name] = shortname
info["reverse_short_param"][shortname] = param_name
return shortname
return param_name
@staticmethod
def add_new_param_name(info, param_name):
short_name = TrialShortNamer.shortname_for_key(info, param_name)
info["short_param"][param_name] = short_name
info["reverse_short_param"][short_name] = param_name
@classmethod
def build_naming_info(cls):
if cls.NAMING_INFO is not None:
return
info = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
field_keys = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(info, k)
cls.NAMING_INFO = info
@classmethod
def shortname(cls, params):
cls.build_naming_info()
assert cls.PREFIX is not None
name = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}")
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
key = cls.NAMING_INFO["short_param"][k]
if isinstance(v, bool):
v = 1 if v else 0
sep = "" if isinstance(v, (int, float)) else "-"
e = f"{key}{sep}{v}"
name.append(e)
return "_".join(name)
@classmethod
def parse_repr(cls, repr):
repr = repr[len(cls.PREFIX) + 1 :]
if repr == "":
values = []
else:
values = repr.split("_")
parameters = {}
for value in values:
if "-" in value:
p_k, p_v = value.split("-")
else:
p_k = re.sub("[0-9.]", "", value)
p_v = float(re.sub("[^0-9.]", "", value))
key = cls.NAMING_INFO["reverse_short_param"][p_k]
parameters[key] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
parameters[k] = cls.DEFAULTS[k]
return parameters
|
TrialShortNamer
|
python
|
google__pytype
|
pytype/blocks/block_serializer.py
|
{
"start": 821,
"end": 877
}
|
class ____:
blocks: list[SerializedBlock]
|
SerializedCode
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-zigzag-path-in-a-binary-tree.py
|
{
"start": 191,
"end": 663
}
|
class ____(object):
def longestZigZag(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def dfs(node, result):
if not node:
return [-1, -1]
left, right = dfs(node.left, result), dfs(node.right, result)
result[0] = max(result[0], left[1]+1, right[0]+1)
return [left[1]+1, right[0]+1]
result = [0]
dfs(root, result)
return result[0]
|
Solution
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 142004,
"end": 142403
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(SponsorsTierOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
|
SponsorsTierOrder
|
python
|
PrefectHQ__prefect
|
src/prefect/server/models/flow_runs.py
|
{
"start": 11693,
"end": 22347
}
|
class ____(PrefectBaseModel):
id: UUID
name: str
upstream_dependencies: List[TaskRunResult]
state: Optional[State]
expected_start_time: Optional[datetime.datetime]
start_time: Optional[datetime.datetime]
end_time: Optional[datetime.datetime]
total_run_time: Optional[datetime.timedelta]
estimated_run_time: Optional[datetime.timedelta]
untrackable_result: bool
async def read_task_run_dependencies(
session: AsyncSession,
flow_run_id: UUID,
) -> List[DependencyResult]:
"""
Get a task run dependency map for a given flow run.
"""
flow_run = await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run_id
)
if not flow_run:
raise ObjectNotFoundError(f"Flow run with id {flow_run_id} not found")
task_runs = await models.task_runs.read_task_runs(
session=session,
flow_run_filter=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[flow_run_id])
),
)
dependency_graph = []
for task_run in task_runs:
inputs = list(set(chain(*task_run.task_inputs.values())))
untrackable_result_status = (
False
if task_run.state is None
else task_run.state.state_details.untrackable_result
)
dependency_graph.append(
DependencyResult(
id=task_run.id,
upstream_dependencies=inputs,
state=task_run.state,
expected_start_time=task_run.expected_start_time,
name=task_run.name,
start_time=task_run.start_time,
end_time=task_run.end_time,
total_run_time=task_run.total_run_time,
estimated_run_time=task_run.estimated_run_time,
untrackable_result=untrackable_result_status,
)
)
return dependency_graph
@db_injector
async def count_flow_runs(
db: PrefectDBInterface,
session: AsyncSession,
flow_filter: Optional[schemas.filters.FlowFilter] = None,
flow_run_filter: Optional[schemas.filters.FlowRunFilter] = None,
task_run_filter: Optional[schemas.filters.TaskRunFilter] = None,
deployment_filter: Optional[schemas.filters.DeploymentFilter] = None,
work_pool_filter: Optional[schemas.filters.WorkPoolFilter] = None,
work_queue_filter: Optional[schemas.filters.WorkQueueFilter] = None,
) -> int:
"""
Count flow runs.
Args:
session: a database session
flow_filter: only count flow runs whose flows match these filters
flow_run_filter: only count flow runs that match these filters
task_run_filter: only count flow runs whose task runs match these filters
deployment_filter: only count flow runs whose deployments match these filters
Returns:
int: count of flow runs
"""
query = select(sa.func.count(None)).select_from(db.FlowRun)
query = await _apply_flow_run_filters(
db,
query,
flow_filter=flow_filter,
flow_run_filter=flow_run_filter,
task_run_filter=task_run_filter,
deployment_filter=deployment_filter,
work_pool_filter=work_pool_filter,
work_queue_filter=work_queue_filter,
)
result = await session.execute(query)
return result.scalar_one()
@db_injector
async def delete_flow_run(
db: PrefectDBInterface, session: AsyncSession, flow_run_id: UUID
) -> bool:
"""
Delete a flow run by flow_run_id, handling concurrency limits if applicable.
Args:
session: A database session
flow_run_id: a flow run id
Returns:
bool: whether or not the flow run was deleted
"""
flow_run = await read_flow_run(session, flow_run_id)
if not flow_run:
return False
deployment_id = flow_run.deployment_id
if deployment_id:
await cleanup_flow_run_concurrency_slots(session=session, flow_run=flow_run)
# Delete the flow run
result = await session.execute(
delete(db.FlowRun).where(db.FlowRun.id == flow_run_id)
)
return result.rowcount > 0
async def set_flow_run_state(
session: AsyncSession,
flow_run_id: UUID,
state: schemas.states.State,
force: bool = False,
flow_policy: Optional[Type[FlowRunOrchestrationPolicy]] = None,
orchestration_parameters: Optional[Dict[str, Any]] = None,
client_version: Optional[str] = None,
) -> OrchestrationResult:
"""
Creates a new orchestrated flow run state.
Setting a new state on a run is the one of the principal actions that is governed by
Prefect's orchestration logic. Setting a new run state will not guarantee creation,
but instead trigger orchestration rules to govern the proposed `state` input. If
the state is considered valid, it will be written to the database. Otherwise, a
it's possible a different state, or no state, will be created. A `force` flag is
supplied to bypass a subset of orchestration logic.
Args:
session: a database session
flow_run_id: the flow run id
state: a flow run state model
force: if False, orchestration rules will be applied that may alter or prevent
the state transition. If True, orchestration rules are not applied.
Returns:
OrchestrationResult object
"""
# load the flow run
run = await models.flow_runs.read_flow_run(
session=session,
flow_run_id=flow_run_id,
# Lock the row to prevent orchestration race conditions
for_update=True,
)
if not run:
raise ObjectNotFoundError(f"Flow run with id {flow_run_id} not found")
initial_state = run.state.as_state() if run.state else None
initial_state_type = initial_state.type if initial_state else None
proposed_state_type = state.type if state else None
intended_transition = (initial_state_type, proposed_state_type)
if force or flow_policy is None:
flow_policy = MinimalFlowPolicy
orchestration_rules = flow_policy.compile_transition_rules(*intended_transition) # type: ignore
global_rules = GlobalFlowPolicy.compile_transition_rules(*intended_transition)
context = FlowOrchestrationContext(
session=session,
run=run,
initial_state=initial_state,
proposed_state=state,
client_version=client_version,
)
if orchestration_parameters is not None:
context.parameters = orchestration_parameters
# apply orchestration rules and create the new flow run state
async with contextlib.AsyncExitStack() as stack:
for rule in orchestration_rules:
context = await stack.enter_async_context(
rule(context, *intended_transition)
)
for rule in global_rules:
context = await stack.enter_async_context(
rule(context, *intended_transition)
)
await context.validate_proposed_state()
if context.orchestration_error is not None:
raise context.orchestration_error
result = OrchestrationResult(
state=context.validated_state,
status=context.response_status,
details=context.response_details,
)
return result
@db_injector
async def read_flow_run_graph(
db: PrefectDBInterface,
session: AsyncSession,
flow_run_id: UUID,
since: datetime.datetime = earliest_possible_datetime(),
) -> Graph:
"""Given a flow run, return the graph of it's task and subflow runs. If a `since`
datetime is provided, only return items that may have changed since that time."""
if isinstance(since, str):
since = DateTime.fromisoformat(since)
return await db.queries.flow_run_graph_v2(
session=session,
flow_run_id=flow_run_id,
since=since,
max_nodes=PREFECT_API_MAX_FLOW_RUN_GRAPH_NODES.value(),
max_artifacts=PREFECT_API_MAX_FLOW_RUN_GRAPH_ARTIFACTS.value(),
)
async def with_system_labels_for_flow_run(
session: AsyncSession,
flow_run: Union[schemas.core.FlowRun, schemas.actions.FlowRunCreate],
) -> schemas.core.KeyValueLabels:
"""Augment user supplied labels with system default labels for a flow
run."""
user_supplied_labels = flow_run.labels or {}
# `deployment_id` is deprecated on `schemas.actions.FlowRunCreate`. Only
# check `deployment_id` if given an instance of a `schemas.core.FlowRun`.
if isinstance(flow_run, schemas.core.FlowRun) and flow_run.deployment_id:
deployment = await models.deployments.read_deployment(
session, deployment_id=flow_run.deployment_id
)
if deployment:
# Use the deployment flow run utility for consistent label generation
return await models.deployments.with_system_labels_for_deployment_flow_run(
session=session,
deployment=deployment,
user_supplied_labels=user_supplied_labels,
)
# If the flow run is not part of a deployment, generate basic flow labels
default_labels = cast(
schemas.core.KeyValueLabels,
{
"prefect.flow.id": str(flow_run.flow_id),
},
)
parent_labels = await models.flows.read_flow_labels(session, flow_run.flow_id) or {}
return parent_labels | default_labels | user_supplied_labels
@db_injector
async def update_flow_run_labels(
db: PrefectDBInterface,
session: AsyncSession,
flow_run_id: UUID,
labels: KeyValueLabels,
) -> bool:
"""
Update flow run labels by patching existing labels with new values.
Args:
session: A database session
flow_run_id: the flow run id to update
labels: the new labels to patch into existing labels
Returns:
bool: whether the update was successful
"""
# First read the existing flow run to get current labels
flow_run: Optional[orm_models.FlowRun] = await read_flow_run(session, flow_run_id)
if not flow_run:
raise ObjectNotFoundError(f"Flow run with id {flow_run_id} not found")
# Merge existing labels with new labels
current_labels = flow_run.labels or {}
updated_labels = {**current_labels, **labels}
try:
# Update the flow run with merged labels
result = await session.execute(
sa.update(db.FlowRun)
.where(db.FlowRun.id == flow_run_id)
.values(labels=updated_labels)
)
success = result.rowcount > 0
if success:
await session.commit() # Explicitly commit
return success
except Exception:
raise
|
DependencyResult
|
python
|
django__django
|
django/db/models/fields/__init__.py
|
{
"start": 67298,
"end": 69040
}
|
class ____(Field):
"""
Store timedelta objects.
Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
"invalid": _(
"“%(value)s” value has an invalid format. It must be in "
"[DD] [[HH:]MM:]ss[.uuuuuu] format."
)
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def get_db_prep_value(self, value, connection, prepared=False):
return connection.ops.adapt_durationfield_value(value)
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super().get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return "" if val is None else duration_string(val)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.DurationField,
**kwargs,
}
)
|
DurationField
|
python
|
kamyu104__LeetCode-Solutions
|
Python/fraction-addition-and-subtraction.py
|
{
"start": 71,
"end": 617
}
|
class ____(object):
def fractionAddition(self, expression):
"""
:type expression: str
:rtype: str
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
ints = map(int, re.findall('[+-]?\d+', expression))
A, B = 0, 1
for i in xrange(0, len(ints), 2):
a, b = ints[i], ints[i+1]
A = A * b + a * B
B *= b
g = gcd(A, B)
A //= g
B //= g
return '%d/%d' % (A, B)
|
Solution
|
python
|
python__mypy
|
mypyc/ir/ops.py
|
{
"start": 15572,
"end": 16520
}
|
class ____(ControlOp):
"""Mark the end of basic block as unreachable.
This is sometimes necessary when the end of a basic block is never
reached. This can also be explicitly added to the end of non-None
returning functions (in None-returning function we can just return
None).
Mypy statically guarantees that the end of the function is not
unreachable if there is not a return statement.
This prevents the block formatter from being confused due to lack
of a leave and also leaves a nifty note in the IR. It is not
generally processed by visitors.
"""
error_kind = ERR_NEVER
def __init__(self, line: int = -1) -> None:
super().__init__(line)
def sources(self) -> list[Value]:
return []
def set_sources(self, new: list[Value]) -> None:
assert not new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_unreachable(self)
|
Unreachable
|
python
|
huggingface__transformers
|
tests/models/d_fine/test_modeling_d_fine.py
|
{
"start": 10980,
"end": 27966
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DFineModel, DFineForObjectDetection) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": DFineModel, "object-detection": DFineForObjectDetection}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
test_torch_exportable = True
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "DFineForObjectDetection":
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = DFineModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=DFineConfig,
has_text_modality=False,
common_properties=["hidden_size", "num_attention_heads"],
)
def test_config(self):
self.config_tester.run_common_tests()
def test_d_fine_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_d_fine_model(*config_and_inputs)
def test_d_fine_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_d_fine_object_detection_head_model(*config_and_inputs)
@unittest.skip(reason="DFine doesn't work well with `nn.DataParallel")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip(reason="DFine does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="DFine does not use test_inputs_embeds_matches_input_ids")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="DFine does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="DFine does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="DFine does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
out_len = len(outputs)
correct_outlen = 15
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "DFineForObjectDetection":
correct_outlen += 2
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[
self.model_tester.decoder_attention_heads,
self.model_tester.num_queries,
self.model_tester.num_queries,
],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_queries,
self.model_tester.decoder_attention_heads,
self.model_tester.decoder_n_levels * self.model_tester.decoder_n_points
if isinstance(self.model_tester.decoder_n_points, int)
else sum(self.model_tester.decoder_n_points),
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
# DFine should maintin encoder_hidden_states output
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions
self.assertEqual(len(self_attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[1].shape[-2:]),
[
self.model_tester.image_size // self.model_tester.feat_strides[-1],
self.model_tester.image_size // self.model_tester.feat_strides[-1],
],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1
)
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.num_queries, self.model_tester.d_model],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
# we take the first output since last_hidden_state is the first item
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_different_timm_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# let's pick a random timm backbone
config.encoder_in_channels = [24, 40, 432]
config.backbone = "tf_mobilenetv3_small_075"
config.backbone_config = None
config.use_timm_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "DFineForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
def test_hf_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Load a pretrained HF checkpoint as backbone
config.backbone = "microsoft/resnet-18"
config.backbone_config = None
config.use_timm_backbone = False
config.use_pretrained_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "DFineForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_with_different_dtypes(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device).to(dtype)
model.eval()
for key, tensor in inputs_dict.items():
if tensor.dtype == torch.float32:
inputs_dict[key] = tensor.to(dtype)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_equivalence_for_static_and_dynamic_anchors(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
h, w = inputs_dict["pixel_values"].shape[-2:]
# convert inputs to the desired dtype
for key, tensor in inputs_dict.items():
if tensor.dtype == torch.float32:
inputs_dict[key] = tensor.to(dtype)
for model_class in self.all_model_classes:
with tempfile.TemporaryDirectory() as tmpdirname:
model_class(config).save_pretrained(tmpdirname)
model_static = model_class.from_pretrained(
tmpdirname, anchor_image_size=[h, w], device_map=torch_device, dtype=dtype
).eval()
model_dynamic = model_class.from_pretrained(
tmpdirname, anchor_image_size=None, device_map=torch_device, dtype=dtype
).eval()
self.assertIsNotNone(model_static.config.anchor_image_size)
self.assertIsNone(model_dynamic.config.anchor_image_size)
with torch.no_grad():
outputs_static = model_static(**self._prepare_for_class(inputs_dict, model_class))
outputs_dynamic = model_dynamic(**self._prepare_for_class(inputs_dict, model_class))
torch.testing.assert_close(
outputs_static.last_hidden_state, outputs_dynamic.last_hidden_state, rtol=1e-4, atol=1e-4
)
TOLERANCE = 1e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
|
DFineModelTest
|
python
|
huggingface__transformers
|
src/transformers/models/poolformer/modeling_poolformer.py
|
{
"start": 9509,
"end": 10167
}
|
class ____(PreTrainedModel):
config: PoolFormerConfig
base_model_prefix = "poolformer"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["PoolFormerLayer"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, PoolFormerLayer):
if hasattr(module, "layer_scale_1"):
init.constant_(module.layer_scale_1, self.config.layer_scale_init_value)
init.constant_(module.layer_scale_2, self.config.layer_scale_init_value)
@auto_docstring
|
PoolFormerPreTrainedModel
|
python
|
huggingface__transformers
|
tests/quantization/bnb/test_4bit.py
|
{
"start": 21181,
"end": 23365
}
|
class ____(Base4bitTest):
def setUp(self):
super().setUp()
def test_multi_accelerator_loading(self):
r"""
This tests that the model has been loaded and can be used correctly on a multi-accelerator setup.
Let's just try to load a model on 2 accelerators and see if it works. The model we test has ~2GB of total, 3GB should suffice
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": 0,
"lm_head": 0,
"transformer.h.0": 0,
"transformer.h.1": 0,
"transformer.h.2": 0,
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 0,
"transformer.h.10": 1,
"transformer.h.11": 1,
"transformer.h.12": 1,
"transformer.h.13": 1,
"transformer.h.14": 1,
"transformer.h.15": 1,
"transformer.h.16": 1,
"transformer.h.17": 0,
"transformer.h.18": 0,
"transformer.h.19": 0,
"transformer.h.20": 0,
"transformer.h.21": 0,
"transformer.h.22": 0,
"transformer.h.23": 1,
"transformer.ln_f": 0,
}
model_parallel = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_4bit=True), device_map=device_map
)
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1})
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Second real batch
output_parallel = model_parallel.generate(
input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10
)
self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
@apply_skip_if_not_implemented
|
Bnb4bitTestMultiAccelerator
|
python
|
falconry__falcon
|
falcon/errors.py
|
{
"start": 96817,
"end": 99022
}
|
class ____(HTTPBadRequest):
"""400 Bad Request.
A parameter is missing from the request. This error may refer to a
parameter in a query string, form, or document that was submitted
with the request.
`param_name` is the only positional argument allowed,
the other arguments are defined as keyword-only.
Args:
param_name (str): The name of the missing parameter.
Keyword Args:
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
param_name: str,
*,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
) -> None:
description = 'The "{0}" parameter is required.'
description = description.format(param_name)
super().__init__(
title='Missing parameter',
description=description,
headers=headers,
**kwargs,
)
|
HTTPMissingParam
|
python
|
oauthlib__oauthlib
|
tests/openid/connect/core/grant_types/test_implicit.py
|
{
"start": 681,
"end": 6762
}
|
class ____(TestCase):
def setUp(self):
self.request = Request('http://a.b/path')
self.request.scopes = ('hello', 'openid')
self.request.expires_in = 1800
self.request.client_id = 'abcdef'
self.request.response_type = 'id_token token'
self.request.redirect_uri = 'https://a.b/cb'
self.request.state = 'abc'
self.request.nonce = 'xyz'
self.mock_validator = mock.MagicMock()
self.mock_validator.get_id_token.side_effect = get_id_token_mock
self.auth = ImplicitGrant(request_validator=self.mock_validator)
token = 'MOCKED_TOKEN'
self.url_query = 'https://a.b/cb?state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
self.url_fragment = 'https://a.b/cb#state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
@mock.patch('oauthlib.common.generate_token')
def test_authorization(self, generate_token):
scope, info = self.auth.validate_authorization_request(self.request)
generate_token.return_value = 'abc'
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
self.assertIsNone(b)
self.assertEqual(s, 302)
self.request.response_type = 'id_token'
token = 'MOCKED_TOKEN'
url = 'https://a.b/cb#state=abc&id_token=%s' % token
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], url, parse_fragment=True)
self.assertIsNone(b)
self.assertEqual(s, 302)
@mock.patch('oauthlib.common.generate_token')
def test_no_prompt_authorization(self, generate_token):
generate_token.return_value = 'abc'
self.request.prompt = 'none'
bearer = BearerToken(self.mock_validator)
self.request.response_mode = 'query'
self.request.id_token_hint = 'me@email.com'
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_query)
self.assertIsNone(b)
self.assertEqual(s, 302)
# Test alternative response modes
self.request.response_mode = 'fragment'
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
# Ensure silent authentication and authorization is done
self.mock_validator.validate_silent_login.return_value = False
self.mock_validator.validate_silent_authorization.return_value = True
self.assertRaises(errors.LoginRequired,
self.auth.validate_authorization_request,
self.request)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=login_required', h['Location'])
self.mock_validator.validate_silent_login.return_value = True
self.mock_validator.validate_silent_authorization.return_value = False
self.assertRaises(errors.ConsentRequired,
self.auth.validate_authorization_request,
self.request)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=consent_required', h['Location'])
# ID token hint must match logged in user
self.mock_validator.validate_silent_authorization.return_value = True
self.mock_validator.validate_user_match.return_value = False
self.assertRaises(errors.LoginRequired,
self.auth.validate_authorization_request,
self.request)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=login_required', h['Location'])
def test_none_multi_prompt(self):
bearer = BearerToken(self.mock_validator)
self.request.prompt = 'none login'
self.assertRaises(errors.InvalidRequestError,
self.auth.validate_authorization_request,
self.request)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.request.prompt = 'none consent'
self.assertRaises(errors.InvalidRequestError,
self.auth.validate_authorization_request,
self.request)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.request.prompt = 'none select_account'
self.assertRaises(errors.InvalidRequestError,
self.auth.validate_authorization_request,
self.request)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.request.prompt = 'consent none login'
self.assertRaises(errors.InvalidRequestError,
self.auth.validate_authorization_request,
self.request)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
@mock.patch('oauthlib.common.generate_token')
def test_required_nonce(self, generate_token):
generate_token.return_value = 'abc'
self.request.nonce = None
self.assertRaises(errors.InvalidRequestError, self.auth.validate_authorization_request, self.request)
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.assertIsNone(b)
self.assertEqual(s, 302)
|
OpenIDImplicitTest
|
python
|
sphinx-doc__sphinx
|
sphinx/ext/autodoc/_sentinels.py
|
{
"start": 1018,
"end": 1262
}
|
class ____(_Sentinel):
"""A special value for :*-members: that matches to any member."""
def __contains__(self, item: object) -> Literal[True]:
return True
def append(self, item: object) -> None:
pass # nothing
|
_All
|
python
|
gevent__gevent
|
src/gevent/tests/test__monkey_queue.py
|
{
"start": 3423,
"end": 8519
}
|
class ____(unittest.TestCase, BlockingTestMixin):
type2test = Queue.Queue
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if not q.empty():
raise RuntimeError("Call this function with an empty queue")
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
q.put(444)
target_first_items = dict(
Queue=111,
LifoQueue=444,
PriorityQueue=111)
actual_first_item = (q.peek(), q.get())
self.assertEqual(actual_first_item,
(target_first_items[q.__class__.__name__],
target_first_items[q.__class__.__name__]),
"q.peek() and q.get() are not equal!")
target_order = dict(Queue=[333, 222, 444],
LifoQueue=[222, 333, 111],
PriorityQueue=[222, 333, 444])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertFalse(q.empty(), "Queue should not be empty")
self.assertFalse(q.full(), "Queue should not be full")
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
try:
q.put(888, block=0)
self.fail("Didn't appear to block with a full queue")
except Queue.Full:
pass
try:
q.put(888, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except Queue.Full:
pass
self.assertEqual(q.qsize(), QUEUE_SIZE)
# Test a blocking put
self.do_blocking_test(q.put, (888,), q.get, ())
self.do_blocking_test(q.put, (888, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except Queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except Queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x is None:
q.task_done()
return
#with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0, 1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0, 1):
q.put(None) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = Queue.JoinableQueue() # self.type2test()
# XXX the same test in subclasses
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = Queue.JoinableQueue() # self.type2test()
# XXX the same test in subclass
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_task_done_with_items(self):
# Passing items to the constructor allows for as
# many task_done calls. Joining before all the task done
# are called returns false
# XXX the same test in subclass
l = [1, 2, 3]
q = Queue.JoinableQueue(items=l)
for i in l:
self.assertFalse(q.join(timeout=0.001))
self.assertEqual(i, q.get())
q.task_done()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
self.assertTrue(q.join(timeout=0.001))
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
|
BaseQueueTest
|
python
|
huggingface__transformers
|
src/transformers/models/gemma3/modular_gemma3.py
|
{
"start": 37117,
"end": 42037
}
|
class ____(PaliGemmaModel):
# we are filtering the logits/labels so we shouldn't divide the loss based on num_items_in_batch
accepts_loss_kwargs = False
def __init__(self, config: Gemma3Config):
super().__init__(config)
del self.text_config_dtype
def get_image_features(self, pixel_values: torch.Tensor) -> torch.Tensor:
"""
Projects the last hidden state from the vision model into language model space.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_outputs = self.vision_tower(pixel_values=pixel_values).last_hidden_state
image_features = self.multi_modal_projector(vision_outputs)
return image_features
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
token_type_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**lm_kwargs,
) -> Union[tuple, Gemma3ModelOutputWithPast]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Replace image id with PAD if the image token if OOV, to avoid index-errors
if input_ids is not None and self.config.image_token_id >= self.vocab_size:
special_image_mask = input_ids == self.config.image_token_id
llm_input_ids = input_ids.clone()
llm_input_ids[special_image_mask] = 0
else:
llm_input_ids = input_ids
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(llm_input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# Merge text and images
if pixel_values is not None:
image_features = self.get_image_features(pixel_values)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
causal_mask_mapping = create_causal_mask_mapping(
self.config,
inputs_embeds,
attention_mask,
cache_position,
past_key_values,
position_ids,
token_type_ids,
pixel_values,
is_training=self.training,
)
outputs = self.language_model(
attention_mask=causal_mask_mapping,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**lm_kwargs,
)
return Gemma3ModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values if use_cache else None,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
|
Gemma3Model
|
python
|
pytorch__pytorch
|
torch/_inductor/remote_cache.py
|
{
"start": 2971,
"end": 4219
}
|
class ____(RemoteCacheSerde[_T, _T]):
def encode(self, data: _T) -> _T:
return data
def decode(self, data: _T) -> _T:
return data
# This class is the top of a RemoteCache. A RemoteCache is fundamentally made of
# three parts:
#
# 1. The controller (this class).
# 2. A serializer/deserializer (instance of RemoteCacheSerde).
# 3. A backend (instance of RemoteCacheBackend).
#
# To write (`put`), the RemoteCache takes data, uses the RemoteCacheSerde to
# convert it for the backend and passes it to the backend.
#
# Conversely when reading (`get`), the RemoteCache takes data from the backend,
# uses the RemoteCacheSerde to convert it and returns it.
#
# The RemoteCacheBackend is generic on _U - which is the type of data the
# backend can directly cache (usually `bytes`).
#
# The RemoteCacheSerde is responsible for converting between _T (the type of
# data the RemoteCache accepts in `put` and returns in `get`) and _U.
#
# When instantiating a RemoteCache you should override, not directly create a
# RemoteCache. The reason is that when logging cache use (`TORCH_LOGS=cache`) we
# use the concrete type of the RemoteCache as the reported cache. See
# RemoteFxGraphCache below as an example.
|
RemoteCachePassthroughSerde
|
python
|
cython__cython
|
Cython/Debugger/libpython.py
|
{
"start": 20535,
"end": 20708
}
|
class ____:
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
|
BuiltInFunctionProxy
|
python
|
pytorch__pytorch
|
torch/ao/nn/quantized/reference/modules/conv.py
|
{
"start": 355,
"end": 1703
}
|
class ____(torch.nn.modules.conv._ConvNd, ReferenceQuantizedModule):
"""A reference version of nn.quantized.Conv2d
we will not pack the parameters in this module, since weight packing is an
optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
this is useful when user want to use this module in other backends like Glow.
"""
__annotations__ = {"bias": Optional[torch.Tensor]}
_IS_REFERENCE = True
@staticmethod
def from_float(cls, float_conv, weight_qparams):
qref_conv = cls(
float_conv.in_channels,
float_conv.out_channels,
float_conv.kernel_size, # type: ignore[arg-type]
float_conv.stride, # type: ignore[arg-type]
float_conv.padding, # type: ignore[arg-type]
float_conv.dilation, # type: ignore[arg-type]
float_conv.groups,
float_conv.bias is not None, # type: ignore[arg-type]
float_conv.padding_mode,
device=float_conv.weight.device,
dtype=float_conv.weight.dtype,
weight_qparams=weight_qparams,
)
qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach())
if float_conv.bias is not None:
qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach())
return qref_conv
|
_ConvNd
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/array_grad_test.py
|
{
"start": 1137,
"end": 5840
}
|
class ____(test.TestCase):
def _testGrad(self, f, x):
max_error = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
self.assertLess(max_error, 1e-4)
def test_gather_v2_simple(self):
x = constant_op.constant([1., 2., 3., 4., 5.], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x, constant_op.constant([2, 0, 2, 4], dtype=dtypes.int32))
self._testGrad(f, x)
def test_gather_v2_more_index_dims(self):
x = constant_op.constant([1., 2., 3., 4., 5.], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x, constant_op.constant([[2, 0], [2, 4]], dtype=dtypes.int32))
self._testGrad(f, x)
def test_gather_v2_more_param_dims(self):
x = constant_op.constant([[1., 2.], [3., 4.]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x, constant_op.constant([1, 0], dtype=dtypes.int32))
self._testGrad(f, x)
def test_gather_v2_axis(self):
x = constant_op.constant([[1., 2.], [3., 4.]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x, constant_op.constant([1, 0], dtype=dtypes.int32), axis=1)
self._testGrad(f, x)
def test_gather_v2_batch_dims(self):
x = constant_op.constant([[1., 2.], [3., 4.]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x,
constant_op.constant([[1, 0], [0, 0]], dtype=dtypes.int32),
axis=1,
batch_dims=1)
self._testGrad(f, x)
def test_gather_v2_2batch_dims(self):
x = constant_op.constant([[[1., 2.], [3., 4.]]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x,
constant_op.constant([[[1, 0], [0, 0]]], dtype=dtypes.int32),
axis=2,
batch_dims=2)
self._testGrad(f, x)
def test_gather_v2_batch_dims_with_axis(self):
x = constant_op.constant([[[1., 2.]], [[3., 4.]]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x,
constant_op.constant([[0], [0]], dtype=dtypes.int32),
axis=2,
batch_dims=1)
self._testGrad(f, x)
def test_gather_v2_zero_bsize_grad_has_matching_shapes(self):
params = array_ops.zeros(shape=[0, 1, 8, 16], dtype=dtypes.float64)
indices = array_ops.zeros(shape=[0, 1, 3], dtype=dtypes.int32)
def f(params):
return array_ops.gather_v2(params, indices, axis=2, batch_dims=2)
grads = backprop.gradients_function(f)(params)
self.assertLen(grads, 1)
self.assertAllEqual(params.shape, grads[0].shape)
def test_broadcast_to(self):
x = constant_op.constant([1., 2., 3.], dtype=dtypes.float64)
y = constant_op.constant([2, 3], dtype=dtypes.int32)
def f(x):
return array_ops.broadcast_to(
x,
y)
self._testGrad(f, x)
def test_broadcast_to_int64(self):
x = constant_op.constant([1., 2., 3.], dtype=dtypes.float64)
y = constant_op.constant([2, 3], dtype=dtypes.int64)
def f(x):
return array_ops.broadcast_to(
x,
y)
self._testGrad(f, x)
def test_slice_int64(self):
x = constant_op.constant([1., 2., 3.], dtype=dtypes.float64)
begin = constant_op.constant([1], dtype=dtypes.int64)
size = constant_op.constant([1], dtype=dtypes.int64)
def f(x):
return array_ops.slice(x, begin, size)
self._testGrad(f, x)
def test_reshape_simple(self):
x = constant_op.constant([1., 2., 3.], dtype=dtypes.float64)
y = constant_op.constant([3, 1], dtype=dtypes.int64)
def f(x):
return array_ops.reshape(x, y)
self._testGrad(f, x)
def test_reshape_one_unknown_dim(self):
def f(x):
x_without_shape = array_ops.placeholder_with_default(x, shape=[None, 2])
return array_ops.reshape(x_without_shape, [3, 2])
x = constant_op.constant([[1., 2.], [3., 4.], [5., 6.]],
dtype=dtypes.float64)
self._testGrad(f, x)
def test_reshape_two_unknown_dims(self):
def f(x):
x_without_shape = array_ops.placeholder_with_default(x,
shape=[None, None])
return array_ops.reshape(x_without_shape, [6])
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.]], dtype=dtypes.float64)
self._testGrad(f, x)
def test_reshape_one_unknown_dim_and_zero_elements(self):
def f(x):
x_without_shape = array_ops.placeholder_with_default(x, shape=[None, 0])
return array_ops.reshape(x_without_shape, [0])
x = constant_op.constant([], shape=[3, 0], dtype=dtypes.float64)
self._testGrad(f, x)
if __name__ == "__main__":
test.main()
|
ArrayGradTest
|
python
|
huggingface__transformers
|
src/transformers/models/groupvit/modeling_groupvit.py
|
{
"start": 43558,
"end": 45401
}
|
class ____(GroupViTPreTrainedModel):
config: GroupViTTextConfig
input_modalities = ("text",)
def __init__(self, config: GroupViTTextConfig):
super().__init__(config)
self.text_model = GroupViTTextTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.text_model.embeddings.token_embedding
def set_input_embeddings(self, value):
self.text_model.embeddings.token_embedding = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
Examples:
```python
>>> from transformers import CLIPTokenizer, GroupViTTextModel
>>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```"""
return self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
|
GroupViTTextModel
|
python
|
bottlepy__bottle
|
test/test_wsgi.py
|
{
"start": 14820,
"end": 17544
}
|
class ____(ServerTestBase):
''' Tests Decorators '''
def test_view(self):
""" WSGI: Test view-decorator (should override autojson) """
with chdir(__file__):
@bottle.route('/tpl')
@bottle.view('stpl_t2main')
def test():
return dict(content='1234')
result = '+base+\n+main+\n!1234!\n+include+\n-main-\n+include+\n-base-\n'
self.assertHeader('Content-Type', 'text/html; charset=UTF-8', '/tpl')
self.assertBody(result, '/tpl')
def test_view_error(self):
""" WSGI: Test if view-decorator reacts on non-dict return values correctly."""
@bottle.route('/tpl')
@bottle.view('stpl_t2main')
def test():
return bottle.HTTPError(401, 'The cake is a lie!')
self.assertInBody('The cake is a lie!', '/tpl')
self.assertInBody('401 Unauthorized', '/tpl')
self.assertStatus(401, '/tpl')
def test_truncate_body(self):
""" WSGI: Some HTTP status codes must not be used with a response-body """
@bottle.route('/test/<code>')
def test(code):
bottle.response.status = int(code)
return 'Some body content'
self.assertBody('Some body content', '/test/200')
self.assertBody('', '/test/100')
self.assertBody('', '/test/101')
self.assertBody('', '/test/204')
self.assertBody('', '/test/304')
def test_routebuild(self):
""" WSGI: Test route builder """
def foo(): pass
bottle.route('/a/<b>/c', name='named')(foo)
bottle.request.environ['SCRIPT_NAME'] = ''
self.assertEqual('/a/xxx/c', bottle.url('named', b='xxx'))
self.assertEqual('/a/xxx/c', bottle.app().get_url('named', b='xxx'))
bottle.request.environ['SCRIPT_NAME'] = '/app'
self.assertEqual('/app/a/xxx/c', bottle.url('named', b='xxx'))
bottle.request.environ['SCRIPT_NAME'] = '/app/'
self.assertEqual('/app/a/xxx/c', bottle.url('named', b='xxx'))
bottle.request.environ['SCRIPT_NAME'] = 'app/'
self.assertEqual('/app/a/xxx/c', bottle.url('named', b='xxx'))
def test_autoroute(self):
app = bottle.Bottle()
def a(): pass
def b(x): pass
def c(x, y): pass
def d(x, y=5): pass
def e(x=5, y=6): pass
self.assertEqual(['/a'],list(bottle.yieldroutes(a)))
self.assertEqual(['/b/<x>'],list(bottle.yieldroutes(b)))
self.assertEqual(['/c/<x>/<y>'],list(bottle.yieldroutes(c)))
self.assertEqual(['/d/<x>','/d/<x>/<y>'],list(bottle.yieldroutes(d)))
self.assertEqual(['/e','/e/<x>','/e/<x>/<y>'],list(bottle.yieldroutes(e)))
|
TestDecorators
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/sqltypes.py
|
{
"start": 25241,
"end": 25539
}
|
class ____(Float[_N]):
"""A type for double ``FLOAT`` floating point types.
Typically generates a ``DOUBLE`` or ``DOUBLE_PRECISION`` in DDL,
and otherwise acts like a normal :class:`.Float` on the Python
side.
.. versionadded:: 2.0
"""
__visit_name__ = "double"
|
Double
|
python
|
readthedocs__readthedocs.org
|
readthedocs/builds/migrations/0024_status_code_choices.py
|
{
"start": 149,
"end": 806
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0023_add_status_code"),
]
operations = [
migrations.RemoveField(
model_name="build",
name="status_code",
),
migrations.AddField(
model_name="build",
name="status",
field=models.CharField(
blank=True,
choices=[("normal", "Normal"), ("duplicated", "Duplicated")],
default=None,
max_length=32,
null=True,
verbose_name="Status",
),
),
]
|
Migration
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_gufunc.py
|
{
"start": 873,
"end": 12471
}
|
class ____(CUDATestCase):
def test_gufunc_small(self):
gufunc = _get_matmulcore_gufunc()
matrix_ct = 2
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
C = gufunc(A, B)
Gold = np.matmul(A, B)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc_auto_transfer(self):
gufunc = _get_matmulcore_gufunc()
matrix_ct = 2
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
dB = cuda.to_device(B)
C = gufunc(A, dB).copy_to_host()
Gold = np.matmul(A, B)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc(self):
gufunc = _get_matmulcore_gufunc()
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
C = gufunc(A, B)
Gold = np.matmul(A, B)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc_hidim(self):
gufunc = _get_matmulcore_gufunc()
matrix_ct = 100 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(4, 25, 2, 4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(4, 25, 4, 5)
C = gufunc(A, B)
Gold = np.matmul(A, B)
self.assertTrue(np.allclose(C, Gold))
def test_gufunc_new_axis(self):
gufunc = _get_matmulcore_gufunc(dtype=float64)
X = np.random.randn(10, 3, 3)
Y = np.random.randn(3, 3)
gold = np.matmul(X, Y)
res1 = gufunc(X, Y)
np.testing.assert_allclose(gold, res1)
res2 = gufunc(X, np.tile(Y, (10, 1, 1)))
np.testing.assert_allclose(gold, res2)
def test_gufunc_stream(self):
gufunc = _get_matmulcore_gufunc()
#cuda.driver.flush_pending_free()
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
stream = cuda.stream()
dA = cuda.to_device(A, stream)
dB = cuda.to_device(B, stream)
dC = cuda.device_array(shape=(1001, 2, 5), dtype=A.dtype, stream=stream)
dC = gufunc(dA, dB, out=dC, stream=stream)
C = dC.copy_to_host(stream=stream)
stream.synchronize()
Gold = np.matmul(A, B)
self.assertTrue(np.allclose(C, Gold))
def test_copy(self):
@guvectorize([void(float32[:], float32[:])],
'(x)->(x)',
target='cuda')
def copy(A, B):
for i in range(B.size):
B[i] = A[i]
A = np.arange(10, dtype=np.float32) + 1
B = np.zeros_like(A)
copy(A, out=B)
np.testing.assert_allclose(A, B)
def test_copy_unspecified_return(self):
# Ensure that behaviour is correct when the return type is not
# specified in the signature.
@guvectorize([(float32[:], float32[:])],
'(x)->(x)',
target='cuda')
def copy(A, B):
for i in range(B.size):
B[i] = A[i]
A = np.arange(10, dtype=np.float32) + 1
B = np.zeros_like(A)
copy(A, out=B)
self.assertTrue(np.allclose(A, B))
def test_copy_odd(self):
@guvectorize([void(float32[:], float32[:])],
'(x)->(x)',
target='cuda')
def copy(A, B):
for i in range(B.size):
B[i] = A[i]
A = np.arange(11, dtype=np.float32) + 1
B = np.zeros_like(A)
copy(A, out=B)
self.assertTrue(np.allclose(A, B))
def test_copy2d(self):
@guvectorize([void(float32[:, :], float32[:, :])],
'(x, y)->(x, y)',
target='cuda')
def copy2d(A, B):
for x in range(B.shape[0]):
for y in range(B.shape[1]):
B[x, y] = A[x, y]
A = np.arange(30, dtype=np.float32).reshape(5, 6) + 1
B = np.zeros_like(A)
copy2d(A, out=B)
self.assertTrue(np.allclose(A, B))
def test_not_supported_call_from_jit(self):
# not supported
@guvectorize([void(int32[:], int32[:])],
'(n)->(n)', target='cuda')
def gufunc_copy(A, b):
for i in range(A.shape[0]):
b[i] = A[i]
@cuda.jit
def cuda_jit(A, b):
return gufunc_copy(A, b)
A = np.arange(1024 * 32).astype('int32')
b = np.zeros_like(A)
msg = "Untyped global name 'gufunc_copy'.*"
with self.assertRaisesRegex(TypingError, msg):
cuda_jit[1, 1](A, b)
# Test inefficient use of the GPU where the inputs are all mapped onto a
# single thread in a single block.
def test_inefficient_launch_configuration(self):
@guvectorize(['void(float32[:], float32[:], float32[:])'],
'(n),(n)->(n)', target='cuda')
def numba_dist_cuda(a, b, dist):
len = a.shape[0]
for i in range(len):
dist[i] = a[i] * b[i]
a = np.random.rand(1024 * 32).astype('float32')
b = np.random.rand(1024 * 32).astype('float32')
dist = np.zeros(a.shape[0]).astype('float32')
with override_config('CUDA_LOW_OCCUPANCY_WARNINGS', 1):
with warnings.catch_warnings(record=True) as w:
numba_dist_cuda(a, b, dist)
self.assertEqual(w[0].category, NumbaPerformanceWarning)
self.assertIn('Grid size', str(w[0].message))
self.assertIn('low occupancy', str(w[0].message))
def test_efficient_launch_configuration(self):
@guvectorize(['void(float32[:], float32[:], float32[:])'],
'(n),(n)->(n)', nopython=True, target='cuda')
def numba_dist_cuda2(a, b, dist):
len = a.shape[0]
for i in range(len):
dist[i] = a[i] * b[i]
a = np.random.rand(524288 * 2).astype('float32').\
reshape((524288, 2))
b = np.random.rand(524288 * 2).astype('float32').\
reshape((524288, 2))
dist = np.zeros_like(a)
with override_config('CUDA_LOW_OCCUPANCY_WARNINGS', 1):
with warnings.catch_warnings(record=True) as w:
numba_dist_cuda2(a, b, dist)
self.assertEqual(len(w), 0)
def test_nopython_flag(self):
def foo(A, B):
pass
# nopython = True is fine
guvectorize([void(float32[:], float32[:])], '(x)->(x)', target='cuda',
nopython=True)(foo)
# nopython = False is bad
with self.assertRaises(TypeError) as raises:
guvectorize([void(float32[:], float32[:])], '(x)->(x)',
target='cuda', nopython=False)(foo)
self.assertEqual("nopython flag must be True", str(raises.exception))
def test_invalid_flags(self):
# Check invalid flags
def foo(A, B):
pass
with self.assertRaises(TypeError) as raises:
guvectorize([void(float32[:], float32[:])], '(x)->(x)',
target='cuda', what1=True, ever2=False)(foo)
head = "The following target options are not supported:"
msg = str(raises.exception)
self.assertEqual(msg[:len(head)], head)
items = msg[len(head):].strip().split(',')
items = [i.strip("'\" ") for i in items]
self.assertEqual(set(['what1', 'ever2']), set(items))
def test_duplicated_output(self):
@guvectorize([void(float32[:], float32[:])], '(x)->(x)', target='cuda')
def foo(inp, out):
pass # intentionally empty; never executed
inp = out = np.zeros(10, dtype=np.float32)
with self.assertRaises(ValueError) as raises:
foo(inp, out, out=out)
msg = "cannot specify argument 'out' as both positional and keyword"
self.assertEqual(str(raises.exception), msg)
def check_tuple_arg(self, a, b):
@guvectorize([(float64[:], float64[:], float64[:])], '(n),(n)->()',
target='cuda')
def gu_reduce(x, y, r):
s = 0
for i in range(len(x)):
s += x[i] * y[i]
r[0] = s
r = gu_reduce(a, b)
expected = np.sum(np.asarray(a) * np.asarray(b), axis=1)
np.testing.assert_equal(expected, r)
def test_tuple_of_tuple_arg(self):
a = ((1.0, 2.0, 3.0),
(4.0, 5.0, 6.0))
b = ((1.5, 2.5, 3.5),
(4.5, 5.5, 6.5))
self.check_tuple_arg(a, b)
def test_tuple_of_namedtuple_arg(self):
Point = namedtuple('Point', ('x', 'y', 'z'))
a = (Point(x=1.0, y=2.0, z=3.0),
Point(x=4.0, y=5.0, z=6.0))
b = (Point(x=1.5, y=2.5, z=3.5),
Point(x=4.5, y=5.5, z=6.5))
self.check_tuple_arg(a, b)
def test_tuple_of_array_arg(self):
a = (np.asarray((1.0, 2.0, 3.0)),
np.asarray((4.0, 5.0, 6.0)))
b = (np.asarray((1.5, 2.5, 3.5)),
np.asarray((4.5, 5.5, 6.5)))
self.check_tuple_arg(a, b)
def test_gufunc_name(self):
gufunc = _get_matmulcore_gufunc()
self.assertEqual(gufunc.__name__, 'matmulcore')
def test_bad_return_type(self):
with self.assertRaises(TypeError) as te:
@guvectorize([int32(int32[:], int32[:])], '(m)->(m)', target='cuda')
def f(x, y):
pass
msg = str(te.exception)
self.assertIn('guvectorized functions cannot return values', msg)
self.assertIn('specifies int32 return type', msg)
def test_incorrect_number_of_pos_args(self):
@guvectorize([(int32[:], int32[:], int32[:])],
'(m),(m)->(m)', target='cuda')
def f(x, y, z):
pass
arr = np.arange(5)
# Inputs only, too few
with self.assertRaises(TypeError) as te:
f(arr)
msg = str(te.exception)
self.assertIn('gufunc accepts 2 positional arguments', msg)
self.assertIn('or 3 positional arguments', msg)
self.assertIn('Got 1 positional argument.', msg)
# Inputs and outputs, too many
with self.assertRaises(TypeError) as te:
f(arr, arr, arr, arr)
msg = str(te.exception)
self.assertIn('gufunc accepts 2 positional arguments', msg)
self.assertIn('or 3 positional arguments', msg)
self.assertIn('Got 4 positional arguments.', msg)
@skip_on_cudasim('ufunc API unsupported in the simulator')
|
TestCUDAGufunc
|
python
|
great-expectations__great_expectations
|
great_expectations/core/batch_spec.py
|
{
"start": 5615,
"end": 6132
}
|
class ____(BatchSpec):
_id_ignore_keys = set("batch_data")
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if self.batch_data is None:
raise InvalidBatchSpecError("RuntimeDataBatchSpec batch_data cannot be None") # noqa: TRY003 # FIXME CoP
@property
def batch_data(self):
return self.get("batch_data")
@batch_data.setter
def batch_data(self, batch_data) -> None:
self["batch_data"] = batch_data
|
RuntimeDataBatchSpec
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/fancylists.py
|
{
"start": 1758,
"end": 14462
}
|
class ____(BlockProcessor):
"""Process fancy ordered list blocks."""
TAG = 'ol'
SIBLING_TAGS = ['ol']
OL_TYPES = {
'dot-decimal': '1',
'paren-decimal': '1',
'dot-roman': 'i',
'paren-roman': 'i',
'dot-ROMAN': 'I',
'paren-ROMAN': 'I',
'dot-alpha': 'a',
'paren-alpha': 'a',
'dot-ALPHA': 'A',
'paren-ALPHA': 'A'
}
def __init__(self, parser, config):
"""Initialize."""
super().__init__(parser)
list_types = config['additional_ordered_styles']
self.alpha_enabled = 'alpha' in list_types
self.roman_enabled = 'roman' in list_types
self.inject_style = config['inject_style']
self.inject_class = config['inject_class']
formats = ''
if 'generic' in list_types:
formats += r'| \#'
if 'roman' in list_types:
# Rules are similar to https://projecteuler.net/about=roman_numerals
# We do not follow the "rule of 3": repeated values should not occur more than 3 times.
# The above link suggests that repeats should be restricted such that lower denominations
# do not equal or exceed X, C or M. We alter this to allow equaling to help mitigate
# conflicts with alphabetical lists.
formats += r'''
| (?=[IVXLCDM]{2})
M*
(?:C[MD]|D(?:C{0,4}|C{5}\b)|(?:C{0,9}|C{10}\b))
(?:X[CL]|L(?:X{0,4}|X{5}\b)|(?:X{0,9}|X{10}\b))
(?:I[XV]|V(?:I{0,4}|I{5}\b)|(?:I{0,9}|I{10}\b))
| (?=[ivxlcdm])
m*
(?:c[md]|d(?:c{0,4}|c{5}\b)|(?:c{0,9}|c{10}\b))
(?:x[cl]|l(?:x{0,4}|x{5}\b)|(?:x{0,9}|x{10}\b))
(?:i[xv]|v(?:i{0,4}|i{5}\b)|(?:i{0,9}|i{10}\b))
'''
if 'alpha' not in list_types:
formats += r'''
| [IVXLCDM](?=\)|\.[ ]{2})
'''
if 'alpha' in list_types:
formats += r'''
| [a-z]
| [A-Z](?=\)|\.[ ]{2})
'''
# Detect an item list item.
self.list_re = re.compile(
r'^[ ]{0,%d}(?:(?:\d+%s)[).])[ ]+(.*)' % (self.tab_length - 1, formats),
re.VERBOSE
)
# Detect items on secondary lines which can be of any list type.
self.child_re = re.compile(
r'^[ ]{0,%d}((?:(?:\d+%s)[).]|[-*+]))[ ]+(.*)' % (self.tab_length - 1, formats),
re.VERBOSE
)
# Detect indented (nested) list items of any type.
self.indent_re = re.compile(
r'^[ ]{%d,%d}(?:(?:\d+%s)[).]|[-*+])[ ]+.*' % (self.tab_length, self.tab_length * 2 - 1, formats),
re.VERBOSE
)
self.startswith = "1"
def test(self, parent, block):
"""Test to see if block starts with a list."""
return bool(self.list_re.match(block))
def run(self, parent, blocks):
"""Process list items."""
sibling = self.lastChild(parent)
# Check for multiple items in one block and get the ordered list fancy type.
items, fancy_type = self.get_items(sibling, blocks.pop(0), blocks)
# Append list items that are under the sibling list if the list type matches
if (
sibling is not None and sibling.tag in self.SIBLING_TAGS and
sibling.attrib.get('__fancylist', '') == fancy_type
):
# Previous block was a list item, so set that as parent
lst = sibling
# Make sure previous item is in a `p` - if the item has text,
# then it isn't in a `p`.
if lst[-1].text:
# Since it's possible there are other children for this
# sibling, we can't just `SubElement` the `p`, we need to
# insert it as the first item.
p = etree.Element('p')
p.text = lst[-1].text
lst[-1].text = ''
lst[-1].insert(0, p)
# If the last item has a tail, then the tail needs to be put in a `p`
# likely only when a header is not followed by a blank line.
lch = self.lastChild(lst[-1])
if lch is not None and lch.tail:
p = etree.SubElement(lst[-1], 'p')
p.text = lch.tail.lstrip()
lch.tail = ''
# Parse first block differently as it gets wrapped in a `p`.
li = etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
# This catches the edge case of a multi-item indented list whose
# first item is in a blank parent-list item:
# ```
# * * subitem1
# * subitem2
# ```
# see also `ListIndentProcessor`
elif parent.tag in ['ol', 'ul']:
lst = parent
# This is a new, unique list so create parent with appropriate tag.
else:
if self.TAG == 'ol':
# Correct the metadata of a forced list to now represent the actual content
if sibling is not None and sibling.attrib.get('__fancylist', '').startswith('force'):
sibling.attrib['__fancylist'] = fancy_type
lst = sibling
else:
attrib = {'type': self.OL_TYPES[fancy_type], '__fancylist': fancy_type}
if self.inject_style:
attrib['style'] = f"list-style-type: {OL_STYLE[attrib['type']]};"
if self.inject_class:
attrib['class'] = f"fancylists-{OL_STYLE[attrib['type']]}"
lst = etree.SubElement(
parent,
self.TAG,
attrib
)
else:
lst = etree.SubElement(parent, self.TAG)
# Check if a custom start integer is set
if self.startswith != '1' and not lst.attrib.get('start', ''):
lst.attrib['start'] = self.startswith
# Set the parse set to list
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the appropriate parent.
for item in items:
# Item is indented. Parse with last item as parent
if item.startswith(' '*self.tab_length):
self.parser.parseBlocks(lst[-1], [item])
# New item. Create `li` and parse with it as parent
else:
li = etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
# Reset the parse state
self.parser.state.reset()
def get_start(self, fancy_type, m):
"""Translate list convention into a logical start."""
# Generic marker
if m.group(1).startswith('#'):
return '1'
t = fancy_type.split('-')[1].lower()
if t == 'decimal':
return m.group(1)[:-1].lstrip('(')
elif t == 'roman':
return str(roman2int(m.group(1)[:-1]))
elif t == 'alpha':
return str(ord(m.group(1)[:-1].upper()) - 64)
def get_fancy_type(self, m, first, fancy_type):
"""Get the fancy type for a given list item."""
value = m.group(1)[:-1]
sep = m.group(1)[-1]
list_type = ''
# Determine list type convention: _., _), (_)
if sep == '.':
list_type += 'dot-'
elif sep == ')':
list_type += 'paren-'
else:
return list_type, fancy_type
# The first item will be forced to assume the sibling list's type
if fancy_type.startswith('force'):
ltype = fancy_type.split('-', 1)[1]
# Make sure we aren't forcing an impossible scenario.
# If everything looks sound, return the types
if value == '#' or (
(ltype.lower() == 'decimal' and value.isdigit()) or
(
ltype.lower() == 'roman' and
self.roman_enabled and
value.isalpha() and
(len(value) > 2 or value.lower() in 'ivxlcdm')
) or
(ltype.lower() == 'alpha' and self.alpha_enabled and len(value) == 1 and value.isalpha())
):
fancy_type = list_type + fancy_type.split('-', 1)[1] if list_type else list_type
return fancy_type, fancy_type
# Ignore the force as it cannot be done
fancy_type = ''
# Determine numbering: numerical, roman numerical, alphabetic, or `#` numerical placeholder.
if value == '#':
list_type += fancy_type.split('-', 1)[1] if fancy_type else 'decimal'
elif value.isdigit():
list_type += 'decimal'
elif len(value) == 1 and value.isalpha():
if value.islower():
in_roman = value in 'ivxlcdm'
if (
self.alpha_enabled and (
not self.roman_enabled or (
first and (not in_roman or ((list_type + 'roman') != fancy_type and value != 'i'))
)
)
):
list_type += 'alpha'
elif self.alpha_enabled and not first and ((list_type + 'alpha') == fancy_type or not in_roman):
list_type += 'alpha'
else:
list_type += 'roman'
elif value.isupper():
in_roman = value in 'IVXLCDM'
if (
self.alpha_enabled and (
not self.roman_enabled or (
first and (not in_roman or ((list_type + 'ROMAN') != fancy_type and value != 'I'))
)
)
):
list_type += 'ALPHA'
elif self.alpha_enabled and not first and ((list_type + 'ALPHA') == fancy_type or not in_roman):
list_type += 'ALPHA'
else:
list_type += 'ROMAN'
elif value.isupper():
list_type += 'ROMAN'
elif value.islower():
list_type += 'roman'
return list_type, fancy_type
def get_items(self, sibling, block, blocks):
"""Break a block into list items."""
# Get ordered list fancy type
fancy_type = ''
if self.TAG == 'ol':
if sibling is not None and sibling.tag in self.SIBLING_TAGS:
fancy_type = sibling.attrib.get('__fancylist', '')
fancy = fancy_type
items = []
rest = []
for line in block.split('\n'):
# We've found a list type that differs form the our current,
# so gather the rest to be processed separately.
if rest:
rest.append(line)
continue
# Child list items
m = self.child_re.match(line)
if m:
# This is a new list item check first item for the start index.
# Also check for list items that differ from the first.
fancy, fancy_type = self.get_fancy_type(m, not items, fancy)
# We found a different fancy type, so handle these separately
if items and fancy != fancy_type:
rest.append(line)
continue
# Detect the integer value of first list item.
# If we are already in a list, just grab that.
if not items and self.TAG == 'ol':
self.startswith = self.get_start(fancy, m)
fancy_type = fancy
# Append to the list
items.append(m.group(2))
# Indented, possibly nested content
elif self.indent_re.match(line):
# Previous item was indented. Append to that item.
if items[-1].startswith(' ' * self.tab_length):
items[-1] = '{}\n{}'.format(items[-1], line)
# Other indented content
else:
items.append(line)
# Append non list items to previous list item.
else:
items[-1] = '{}\n{}'.format(items[-1], line)
# Insert non-list items back into the blocks to be parsed later
if rest:
blocks.insert(0, '\n'.join(rest))
return items, fancy_type
|
FancyOListProcessor
|
python
|
wandb__wandb
|
tools/local_wandb_server.py
|
{
"start": 9071,
"end": 12841
}
|
class ____:
base_port: int
fixture_port: int
def apply_ports(self, server: _ServerInfo) -> None:
server.base_port = self.base_port
server.fixture_port = self.fixture_port
def _start_container(*, name: str) -> _WandbContainerPorts:
"""Start the local-testcontainer.
This issues the `docker run` command and returns immediately.
Args:
name: The container name to use.
"""
registry = click.prompt("Registry", default="us-central1-docker.pkg.dev")
repository = click.prompt(
"Repository", default="wandb-production/images/local-testcontainer"
)
tag = click.prompt("Tag", default="master")
pull = click.prompt(
"--pull",
default="always",
type=click.Choice(["always", "never", "missing"]),
)
docker_flags = [
"--rm",
"--detach",
*["--pull", pull],
*["-e", "WANDB_ENABLE_TEST_CONTAINER=true"],
*["--name", name],
*["--volume", f"{name}-vol:/vol"],
# Expose ports to the host.
*["--publish", "8080"], # base port
*["--publish", "9015"], # fixture port
# Only this platform is available for now. Without specifying it,
# Docker defaults to the host's platform and fails if it's not
# supported.
*["--platform", "linux/amd64"],
]
image = f"{registry}/{repository}:{tag}"
command = ["docker", "run", *docker_flags, image]
_echo_info(f"Running command: {shlex.join(command)}")
subprocess.check_call(command, stdout=sys.stderr)
return _get_ports_retrying(name)
def _stop_container(name: str) -> None:
subprocess.check_call(["docker", "rm", "-f", name], stdout=sys.stderr)
def _get_ports_retrying(name: str) -> _WandbContainerPorts:
"""Returns the local-testcontainer's ports.
Retries up to one second before failing.
"""
ports = None
ports_start_time = time.monotonic()
while not ports and time.monotonic() - ports_start_time < 1:
ports = _get_ports(name)
if not ports:
time.sleep(0.1)
if not ports:
_echo_bad("Failed to get ports from container.")
sys.exit(1)
return ports
def _get_ports(name: str) -> _WandbContainerPorts | None:
"""Query the container's ports.
Returns None if the container's ports are not available yet. On occasion,
`docker port` doesn't return all ports if it happens too soon after
`docker run`.
"""
ports_str = subprocess.check_output(["docker", "port", name]).decode()
port_line_re = re.compile(r"(\d+)(\/\w+)? -> [^:]*:(\d+)")
base_port = 0
fixture_port = 0
for line in ports_str.splitlines():
match = port_line_re.fullmatch(line)
if not match:
continue
internal_port = match.group(1)
external_port = match.group(3)
if internal_port == "8080":
base_port = int(external_port)
elif internal_port == "9015":
fixture_port = int(external_port)
if not base_port:
return None
if not fixture_port:
return None
return _WandbContainerPorts(
base_port=base_port,
fixture_port=fixture_port,
)
def _echo_good(msg: str) -> None:
msg = click.style(msg, fg="green")
prefix = click.style("local_wandb_server.py", bold=True)
click.echo(f"{prefix}: {msg}", err=True)
def _echo_info(msg: str) -> None:
prefix = click.style("local_wandb_server.py", bold=True)
click.echo(f"{prefix}: {msg}", err=True)
def _echo_bad(msg: str) -> None:
msg = click.style(msg, fg="red")
prefix = click.style("local_wandb_server.py", bold=True)
click.echo(f"{prefix}: {msg}", err=True)
if __name__ == "__main__":
main()
|
_WandbContainerPorts
|
python
|
ray-project__ray
|
python/ray/_private/gc_collect_manager.py
|
{
"start": 132,
"end": 1604
}
|
class ____(threading.Thread):
"""A background thread that triggers Python garbage collection.
This thread waits for GC events from CoreWorker and triggers `gc.collect()` when
when requested."""
def __init__(self, *, gc_collect_func: Optional[Callable] = None):
logger.debug("Starting Python GC thread")
super().__init__(name="PythonGCThread", daemon=True)
self._should_exit = False
self._gc_event = threading.Event()
# Sets the gc_collect_func (only for testing), defaults to gc.collect
self._gc_collect_func = gc_collect_func or gc.collect
def trigger_gc(self) -> None:
self._gc_event.set()
def run(self):
while not self._should_exit:
self._gc_event.wait()
self._gc_event.clear()
if self._should_exit:
break
try:
start = time.monotonic()
num_freed = self._gc_collect_func()
if num_freed > 0:
logger.debug(
"gc.collect() freed {} refs in {} seconds".format(
num_freed, time.monotonic() - start
)
)
except Exception as e:
logger.error(f"Error during GC: {e}")
def stop(self):
logger.debug("Stopping Python GC thread")
self._should_exit = True
self._gc_event.set()
self.join()
|
PythonGCThread
|
python
|
run-llama__llama_index
|
llama-index-integrations/agent/llama-index-agent-azure/tests/test_azure_foundry_agent.py
|
{
"start": 745,
"end": 18705
}
|
class ____:
def __init__(self, items):
self._items = items
def __aiter__(self):
self._iter = iter(self._items)
return self
async def __anext__(self):
try:
return next(self._iter)
except StopIteration:
raise StopAsyncIteration
def test_azure_foundry_agent_constructor():
"""Test the constructor of AzureFoundryAgent."""
endpoint = "https://test-endpoint.com"
model = "gpt-4o"
name = "test-azure-agent"
instructions = "You are a test agent."
thread_id = "test-thread-123"
verbose = True
run_retrieve_sleep_time = 0.5
mock_project_client_instance = MagicMock(spec=AIProjectClient)
mock_azure_agent_instance = MagicMock(spec=AzureAgent)
mock_azure_agent_instance.id = "mock_agent_id_123"
mock_thread_instance = MagicMock(spec=AgentThread)
mock_thread_instance.id = thread_id
# Patch async methods with AsyncMock
mock_project_client_instance.agents.create_agent = AsyncMock(
return_value=mock_azure_agent_instance
)
mock_project_client_instance.agents.threads.create = AsyncMock(
return_value=mock_thread_instance
)
# Mock DefaultAzureCredential to avoid actual credential loading
with patch(
"llama_index.agent.azure_foundry_agent.base.DefaultAzureCredential", MagicMock()
) as mock_default_credential:
# Mock AIProjectClient constructor to return our mock instance
with patch(
"llama_index.agent.azure_foundry_agent.base.AIProjectClient",
return_value=mock_project_client_instance,
) as mock_ai_project_client_constructor:
# Mock the create_agent call
mock_project_client_instance.agents.create_agent.return_value = (
mock_azure_agent_instance
)
# Mock the threads.create call for when thread_id is None
mock_project_client_instance.agents.threads.create.return_value = (
mock_thread_instance
)
# Test case 1: Initialize with a specific thread_id
agent_with_thread = AzureFoundryAgent(
endpoint=endpoint,
model=model,
name=name,
instructions=instructions,
thread_id=thread_id,
verbose=verbose,
run_retrieve_sleep_time=run_retrieve_sleep_time,
)
mock_ai_project_client_constructor.assert_called_once_with(
endpoint=endpoint, credential=mock_default_credential.return_value
)
# Ensure threads.create was NOT called because thread_id was provided
mock_project_client_instance.agents.threads.create.assert_not_called()
assert isinstance(agent_with_thread, AzureFoundryAgent)
assert agent_with_thread._endpoint == endpoint
assert agent_with_thread._model == model
assert agent_with_thread.name == name
assert agent_with_thread._instructions == instructions
assert agent_with_thread._thread_id == thread_id
assert agent_with_thread._verbose == verbose
assert agent_with_thread._run_retrieve_sleep_time == run_retrieve_sleep_time
assert agent_with_thread._client == mock_project_client_instance
# Reset mocks for the next instantiation test
mock_ai_project_client_constructor.reset_mock()
mock_project_client_instance.reset_mock()
mock_default_credential.reset_mock()
# Mock the threads.create call for when thread_id is None
# Re-assign thread_id for the new mock thread instance if it's different
new_mock_thread_id = "new-mock-thread-456"
mock_thread_instance_new = MagicMock(spec=AgentThread)
mock_thread_instance_new.id = new_mock_thread_id
mock_project_client_instance.agents.threads.create = AsyncMock(
return_value=mock_thread_instance_new
)
# Test case 2: Initialize without a specific thread_id (should create one)
agent_new_thread = AzureFoundryAgent(
endpoint=endpoint,
model=model,
name=name,
instructions=instructions,
thread_id=None, # Test thread creation
verbose=verbose,
run_retrieve_sleep_time=run_retrieve_sleep_time,
)
assert agent_new_thread.name == name
assert agent_new_thread._client == mock_project_client_instance
# At this point, thread should not be created yet
mock_project_client_instance.agents.threads.create.assert_not_called()
# Now, trigger thread creation by calling _ensure_agent
import asyncio
asyncio.run(agent_new_thread._ensure_agent([]))
mock_project_client_instance.agents.threads.create.assert_called_once()
assert agent_new_thread._thread_id == new_mock_thread_id
@patch("azure.identity.aio.DefaultAzureCredential")
@patch("azure.ai.projects.aio.AIProjectClient")
@pytest.mark.asyncio # Added decorator
async def test_azure_foundry_agent_constructor_defaults( # Added async and mock arguments
mock_project_client_class: MagicMock, mock_credential_class: MagicMock
):
"""Test the constructor of AzureFoundryAgent with default values."""
endpoint = "https://test-endpoint.com"
model = "gpt-4o"
name = "test-azure-agent-defaults"
instructions = "You are a test agent. (defaults)"
thread_id = None
verbose = False
run_retrieve_sleep_time = 1.0
mock_project_client_instance = MagicMock(spec=AIProjectClient)
mock_azure_agent_instance = MagicMock(spec=AzureAgent)
mock_azure_agent_instance.id = "mock_agent_id_defaults"
mock_thread_instance = MagicMock(spec=AgentThread)
mock_thread_instance.id = "mock_thread_id_defaults"
# Patch async methods with AsyncMock
mock_project_client_instance.agents.create_agent = AsyncMock(
return_value=mock_azure_agent_instance
)
mock_project_client_instance.agents.threads.create = AsyncMock(
return_value=mock_thread_instance
)
with patch(
"llama_index.agent.azure_foundry_agent.base.DefaultAzureCredential", MagicMock()
):
with patch(
"llama_index.agent.azure_foundry_agent.base.AIProjectClient",
return_value=mock_project_client_instance,
):
# Test initialization with defaults
agent_defaults = AzureFoundryAgent(
endpoint=endpoint,
model=model,
name=name,
instructions=instructions,
thread_id=thread_id,
verbose=verbose,
run_retrieve_sleep_time=run_retrieve_sleep_time,
)
assert agent_defaults.name == name
assert agent_defaults._endpoint == endpoint
assert agent_defaults._model == model
assert agent_defaults._instructions == instructions
assert agent_defaults._thread_id is None
assert agent_defaults._verbose is False
assert agent_defaults._run_retrieve_sleep_time == run_retrieve_sleep_time
assert agent_defaults._client == mock_project_client_instance
# Ensure that create_agent and threads.create are called only after _ensure_agent
await agent_defaults._ensure_agent([])
print(
f"create_agent call count: {mock_project_client_instance.agents.create_agent.call_count}"
)
print(
f"threads.create call count: {mock_project_client_instance.agents.threads.create.call_count}"
)
mock_project_client_instance.agents.create_agent.assert_called_once()
mock_project_client_instance.agents.threads.create.assert_called_once()
# Check that the thread_id was set to the created thread's ID
assert agent_defaults._thread_id == mock_thread_instance.id
# Tests for _llama_to_azure_content_blocks
@pytest.mark.parametrize(
("desc", "chat_messages", "expected_types", "expected_values"),
[
(
"empty input",
[],
[],
[],
),
(
"text only",
[ChatMessage(role="user", blocks=[TextBlock(text="Hello")])],
[MessageInputTextBlock],
["Hello"],
),
(
"image url",
[
ChatMessage(
role="user",
blocks=[
ImageBlock(url="http://example.com/image.png", detail="low")
],
)
],
[MessageInputImageUrlBlock],
["http://example.com/image.png"],
),
(
"no blocks, just content",
[ChatMessage(role="user", content="Just text content, no blocks attr")],
[MessageInputTextBlock],
["Just text content, no blocks attr"],
),
(
"empty blocks",
[ChatMessage(role="user", blocks=[])],
[],
[],
),
(
"image block no path no url",
[
ChatMessage(
role="user",
blocks=[ImageBlock(image=b"some_image_data", detail="high")],
)
],
[],
[],
),
],
)
def test_llama_to_azure_content_blocks_param(
desc, chat_messages, expected_types, expected_values
):
agent = AzureFoundryAgent(endpoint="dummy_endpoint")
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == len(expected_types)
for r, t, v in zip(result, expected_types, expected_values):
assert isinstance(r, t)
# Check value for text or url
if isinstance(r, MessageInputTextBlock):
assert r.text == v
elif isinstance(r, MessageInputImageUrlBlock):
assert r.image_url.url == v
def test_llama_to_azure_content_blocks_image_path_and_mixed():
agent = AzureFoundryAgent(endpoint="dummy_endpoint")
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
# image path
chat_messages = [
ChatMessage(
role="user", blocks=[ImageBlock(path=Path(tmp.name), detail="high")]
)
]
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == 1
assert isinstance(result[0], MessageInputImageFileBlock)
assert result[0].image_file.file_id == tmp.name
assert result[0].image_file.detail == "high"
# mixed content
chat_messages = [
ChatMessage(
role="user",
blocks=[
TextBlock(text="Describe this image:"),
ImageBlock(path=Path(tmp.name)),
],
)
]
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == 2
assert isinstance(result[0], MessageInputTextBlock)
assert result[0].text == "Describe this image:"
assert isinstance(result[1], MessageInputImageFileBlock)
assert result[1].image_file.file_id == tmp.name
# image block path preferred over image attr
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
chat_messages = [
ChatMessage(
role="user",
blocks=[ImageBlock(path=Path(tmp.name), image=b"image_bytes")],
)
]
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == 1
assert isinstance(result[0], MessageInputImageFileBlock)
assert result[0].image_file.file_id == tmp.name
# image bytes only, should be skipped
chat_messages_bytes_only = [
ChatMessage(
role="user", blocks=[ImageBlock(image=b"image_bytes_data", detail="auto")]
)
]
result_bytes_only = agent._llama_to_azure_content_blocks(chat_messages_bytes_only)
assert len(result_bytes_only) == 0
def test_llama_to_azure_content_blocks_multiple_messages():
agent = AzureFoundryAgent(endpoint="dummy_endpoint")
with tempfile.NamedTemporaryFile(suffix=".gif") as tmp:
chat_messages = [
ChatMessage(role="user", blocks=[TextBlock(text="First message.")]),
ChatMessage(
role="user", blocks=[ImageBlock(url="http://images.com/pic.png")]
),
ChatMessage(
role="user",
blocks=[
TextBlock(text="Third message text."),
ImageBlock(path=Path(tmp.name)),
],
),
]
result = agent._llama_to_azure_content_blocks(chat_messages)
assert len(result) == 4
assert isinstance(result[0], MessageInputTextBlock)
assert result[0].text == "First message."
assert isinstance(result[1], MessageInputImageUrlBlock)
assert result[1].image_url.url == "http://images.com/pic.png"
assert isinstance(result[2], MessageInputTextBlock)
assert result[2].text == "Third message text."
assert isinstance(result[3], MessageInputImageFileBlock)
assert result[3].image_file.file_id == tmp.name
# --- Workflow and tool call tests from the other file ---
@pytest.mark.asyncio
async def test_azure_foundry_agent_workflow():
with (
patch(
"llama_index.agent.azure_foundry_agent.base.DefaultAzureCredential",
MagicMock(),
),
patch(
"llama_index.agent.azure_foundry_agent.base.AIProjectClient"
) as mock_client_class,
):
mock_client = MagicMock()
mock_client_class.return_value = mock_client
mock_client.agents.create_agent = AsyncMock()
mock_client.agents.threads.create = AsyncMock()
mock_client.agents.get_agent = AsyncMock()
mock_client.agents.messages.create = AsyncMock()
mock_client.agents.runs.create = AsyncMock()
mock_client.agents.runs.get = AsyncMock()
mock_client.agents.messages.list.return_value = DummyAsyncIterator([])
mock_client.agents.runs.submit_tool_outputs = AsyncMock()
mock_client.close = AsyncMock()
agent = AzureFoundryAgent(
endpoint="https://fake-endpoint",
model="gpt-4o",
name="azure-agent",
instructions="Test agent",
verbose=True,
)
workflow = AgentWorkflow(
agents=[agent],
)
memory = ChatMemoryBuffer.from_defaults()
handler = workflow.run(user_msg="Hello, agent!", memory=memory)
events = []
async for event in handler.stream_events():
events.append(event)
response = await handler
assert response is not None
@pytest.mark.asyncio
async def test_azure_foundry_agent_tool_call():
with (
patch(
"llama_index.agent.azure_foundry_agent.base.DefaultAzureCredential",
MagicMock(),
),
patch(
"llama_index.agent.azure_foundry_agent.base.AIProjectClient"
) as mock_client_class,
):
mock_client = MagicMock()
mock_client_class.return_value = mock_client
mock_client.agents.create_agent = AsyncMock()
mock_client.agents.threads.create = AsyncMock()
mock_client.agents.get_agent = AsyncMock()
mock_client.agents.messages.create = AsyncMock()
mock_client.close = AsyncMock()
class DummyRun:
def __init__(self, status, required_action=None):
self.status = status
self.required_action = required_action
self.id = "runid"
class DummyRequiredAction:
type = "submit_tool_outputs"
submit_tool_outputs = SimpleNamespace(
tool_calls=[
SimpleNamespace(
id="toolid",
function=SimpleNamespace(
name="my_tool", arguments=json.dumps({"x": 1})
),
)
]
)
mock_client.agents.runs.create = AsyncMock(
return_value=DummyRun("requires_action", DummyRequiredAction())
)
mock_client.agents.runs.get = AsyncMock(
side_effect=[
DummyRun("requires_action", DummyRequiredAction()),
DummyRun("completed"),
]
)
assistant_message = SimpleNamespace(
role="assistant",
content=[
SimpleNamespace(
type="text", text=SimpleNamespace(value="Tool call complete!")
)
],
)
def messages_list_side_effect(*args, **kwargs):
return DummyAsyncIterator([assistant_message, assistant_message])
mock_client.agents.messages.list.side_effect = messages_list_side_effect
mock_client.agents.runs.submit_tool_outputs = AsyncMock()
agent = AzureFoundryAgent(
endpoint="https://fake-endpoint",
model="gpt-4o",
name="azure-agent",
instructions="Test agent",
verbose=True,
tools=[lambda x: x], # Dummy tool
)
workflow = AgentWorkflow(agents=[agent])
memory = ChatMemoryBuffer.from_defaults()
handler = workflow.run(user_msg="Trigger tool", memory=memory)
events = []
async for event in handler.stream_events():
events.append(event)
response = await handler
assert "Tool call complete!" in response.response.content
|
DummyAsyncIterator
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_utf8_05.py
|
{
"start": 314,
"end": 987
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("utf8_05.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Test the creation of an XlsxWriter file with utf-8 strings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_formula("A1", '="Café"', None, "Café")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/group_stream.py
|
{
"start": 3088,
"end": 3250
}
|
class ____(NamedTuple):
stats_period: str | None
stats_period_start: datetime | None = None
stats_period_end: datetime | None = None
|
GroupStatsQueryArgs
|
python
|
apache__airflow
|
airflow-core/src/airflow/models/dagrun.py
|
{
"start": 5297,
"end": 90158
}
|
class ____(Base, LoggingMixin):
"""
Invocation instance of a DAG.
A DAG run can be created by the scheduler (i.e. scheduled runs), or by an
external trigger (i.e. manual runs).
"""
active_spans = ThreadSafeDict()
__tablename__ = "dag_run"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
dag_id: Mapped[str] = mapped_column(StringID(), nullable=False)
queued_at: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
logical_date: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
start_date: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
end_date: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
_state: Mapped[str] = mapped_column("state", String(50), default=DagRunState.QUEUED)
run_id: Mapped[str] = mapped_column(StringID(), nullable=False)
creating_job_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
run_type: Mapped[str] = mapped_column(String(50), nullable=False)
triggered_by: Mapped[DagRunTriggeredByType | None] = mapped_column(
Enum(DagRunTriggeredByType, native_enum=False, length=50), nullable=True
) # Airflow component that triggered the run.
triggering_user_name: Mapped[str | None] = mapped_column(
String(512),
nullable=True,
) # The user that triggered the DagRun, if applicable
conf: Mapped[dict[str, Any] | None] = mapped_column(
JSON().with_variant(postgresql.JSONB, "postgresql"), nullable=True
)
# These two must be either both NULL or both datetime.
data_interval_start: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
data_interval_end: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
# Earliest time when this DagRun can start running.
run_after: Mapped[datetime] = mapped_column(UtcDateTime, default=_default_run_after, nullable=False)
# When a scheduler last attempted to schedule TIs for this DagRun
last_scheduling_decision: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
# Foreign key to LogTemplate. DagRun rows created prior to this column's
# existence have this set to NULL. Later rows automatically populate this on
# insert to point to the latest LogTemplate entry.
log_template_id: Mapped[int] = mapped_column(
Integer,
ForeignKey("log_template.id", name="task_instance_log_template_id_fkey", ondelete="NO ACTION"),
default=select(func.max(LogTemplate.__table__.c.id)),
)
updated_at: Mapped[datetime] = mapped_column(
UtcDateTime, default=timezone.utcnow, onupdate=timezone.utcnow
)
# Keeps track of the number of times the dagrun had been cleared.
# This number is incremented only when the DagRun is re-Queued,
# when the DagRun is cleared.
clear_number: Mapped[int] = mapped_column(Integer, default=0, nullable=False, server_default="0")
backfill_id: Mapped[int | None] = mapped_column(Integer, ForeignKey("backfill.id"), nullable=True)
"""
The backfill this DagRun is currently associated with.
It's possible this could change if e.g. the dag run is cleared to be rerun, or perhaps re-backfilled.
"""
bundle_version: Mapped[str | None] = mapped_column(StringID(), nullable=True)
scheduled_by_job_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
# Span context carrier, used for context propagation.
context_carrier: Mapped[dict[str, Any] | None] = mapped_column(
MutableDict.as_mutable(ExtendedJSON), nullable=True
)
span_status: Mapped[str] = mapped_column(
String(250), server_default=SpanStatus.NOT_STARTED, nullable=False
)
created_dag_version_id: Mapped[str | None] = mapped_column(
UUIDType(binary=False),
ForeignKey("dag_version.id", name="created_dag_version_id_fkey", ondelete="set null"),
nullable=True,
)
"""The id of the dag version column that was in effect at dag run creation time.
:meta private:
"""
partition_key: Mapped[str | None] = mapped_column(StringID(), nullable=True)
# Remove this `if` after upgrading Sphinx-AutoAPI
if not TYPE_CHECKING and "BUILDING_AIRFLOW_DOCS" in os.environ:
dag: SerializedDAG | None
else:
dag: SerializedDAG | None = None
__table_args__ = (
Index("dag_id_state", dag_id, _state),
UniqueConstraint("dag_id", "run_id", name="dag_run_dag_id_run_id_key"),
UniqueConstraint("dag_id", "logical_date", name="dag_run_dag_id_logical_date_key"),
Index("idx_dag_run_dag_id", dag_id),
Index("idx_dag_run_run_after", run_after),
Index(
"idx_dag_run_running_dags",
"state",
"dag_id",
postgresql_where=text("state='running'"),
sqlite_where=text("state='running'"),
),
# since mysql lacks filtered/partial indices, this creates a
# duplicate index on mysql. Not the end of the world
Index(
"idx_dag_run_queued_dags",
"state",
"dag_id",
postgresql_where=text("state='queued'"),
sqlite_where=text("state='queued'"),
),
)
task_instances = relationship(
TI,
back_populates="dag_run",
cascade="save-update, merge, delete, delete-orphan",
)
task_instances_histories = relationship(
TIH,
primaryjoin="and_(DagRun.dag_id == TaskInstanceHistory.dag_id, DagRun.run_id == TaskInstanceHistory.run_id)",
foreign_keys="TaskInstanceHistory.dag_id, TaskInstanceHistory.run_id",
order_by=TIH.dag_version_id,
viewonly=True,
)
dag_model = relationship(
"DagModel",
primaryjoin="foreign(DagRun.dag_id) == DagModel.dag_id",
uselist=False,
viewonly=True,
)
dag_run_note = relationship(
"DagRunNote",
back_populates="dag_run",
uselist=False,
cascade="all, delete, delete-orphan",
)
deadlines = relationship(
"Deadline",
back_populates="dagrun",
uselist=True,
cascade="all, delete, delete-orphan",
)
created_dag_version = relationship("DagVersion", uselist=False, passive_deletes=True)
"""
The dag version that was active when the dag run was created, if available.
:meta private:
"""
backfill = relationship(Backfill, uselist=False)
backfill_max_active_runs = association_proxy("backfill", "max_active_runs")
max_active_runs = association_proxy("dag_model", "max_active_runs")
note = association_proxy("dag_run_note", "content", creator=_creator_note)
DEFAULT_DAGRUNS_TO_EXAMINE = airflow_conf.getint(
"scheduler",
"max_dagruns_per_loop_to_schedule",
fallback=20,
)
_ti_dag_versions = association_proxy("task_instances", "dag_version")
_tih_dag_versions = association_proxy("task_instances_histories", "dag_version")
def __init__(
self,
dag_id: str | None = None,
run_id: str | None = None,
*,
queued_at: datetime | None | ArgNotSet = NOTSET,
logical_date: datetime | None = None,
run_after: datetime | None = None,
start_date: datetime | None = None,
conf: Any | None = None,
state: DagRunState | None = None,
run_type: str | None = None,
creating_job_id: int | None = None,
data_interval: tuple[datetime, datetime] | None = None,
triggered_by: DagRunTriggeredByType | None = None,
triggering_user_name: str | None = None,
backfill_id: NonNegativeInt | None = None,
bundle_version: str | None = None,
partition_key: str | None = None,
):
# For manual runs where logical_date is None, ensure no data_interval is set.
if logical_date is None and data_interval is not None:
raise ValueError("data_interval must be None if logical_date is None")
if data_interval is None:
# Legacy: Only happen for runs created prior to Airflow 2.2.
self.data_interval_start = self.data_interval_end = None
else:
self.data_interval_start, self.data_interval_end = data_interval
self.bundle_version = bundle_version
if dag_id is not None:
self.dag_id = dag_id
if run_id is not None:
self.run_id = run_id
self.logical_date = logical_date
if run_after is not None:
self.run_after = run_after
self.start_date = start_date
self.conf = conf or {}
if state is not None:
self.state = state
if not is_arg_set(queued_at):
self.queued_at = timezone.utcnow() if state == DagRunState.QUEUED else None
elif queued_at is not None:
self.queued_at = queued_at
if run_type is not None:
self.run_type = run_type
self.creating_job_id = creating_job_id
self.backfill_id = backfill_id
self.clear_number = 0
self.triggered_by = triggered_by
self.triggering_user_name = triggering_user_name
self.scheduled_by_job_id = None
self.context_carrier = {}
if not isinstance(partition_key, str | None):
raise ValueError(
f"Expected partition_key to be a `str` or `None` but got `{partition_key.__class__.__name__}`"
)
self.partition_key = partition_key
super().__init__()
def __repr__(self):
return (
f"<DagRun {self.dag_id} @ {self.logical_date}: {self.run_id}, state:{self.state}, "
f"queued_at: {self.queued_at}. run_type: {self.run_type}>"
)
@validates("run_id")
def validate_run_id(self, key: str, run_id: str) -> str | None:
if not run_id:
return None
if re.match(RUN_ID_REGEX, run_id):
return run_id
regex = airflow_conf.get("scheduler", "allowed_run_id_pattern").strip()
if regex and re.match(regex, run_id):
return run_id
raise ValueError(
f"The run_id provided '{run_id}' does not match regex pattern '{regex}' or '{RUN_ID_REGEX}'"
)
@property
def dag_versions(self) -> list[DagVersion]:
"""Return the DAG versions associated with the TIs of this DagRun."""
# when the dag is in a versioned bundle, we keep the dag version fixed
if self.bundle_version:
return [self.created_dag_version] if self.created_dag_version is not None else []
dag_versions = [
dv
for dv in dict.fromkeys(list(self._tih_dag_versions) + list(self._ti_dag_versions))
if dv is not None
]
sorted_ = sorted(dag_versions, key=lambda dv: dv.id)
return sorted_
@property
def version_number(self) -> int | None:
"""Return the DAG version number associated with the latest TI of this DagRun."""
dag_versions = self.dag_versions
if dag_versions:
return dag_versions[-1].version_number
return None
@hybrid_property
def duration(self) -> float | None:
if self.end_date and self.start_date:
return (self.end_date - self.start_date).total_seconds()
return None
@duration.expression # type: ignore[no-redef]
@provide_session
def duration(cls, session: Session = NEW_SESSION) -> Case:
dialect_name = get_dialect_name(session)
if dialect_name == "mysql":
return func.timestampdiff(text("SECOND"), cls.start_date, cls.end_date)
if dialect_name == "sqlite":
duration_expr = (func.julianday(cls.end_date) - func.julianday(cls.start_date)) * 86400
else:
duration_expr = func.extract("epoch", cls.end_date - cls.start_date)
when_condition = (
(cls.end_date != None) & (cls.start_date != None), # noqa: E711
duration_expr,
)
return case(when_condition, else_=None)
@provide_session
def check_version_id_exists_in_dr(self, dag_version_id: UUIDType, session: Session = NEW_SESSION):
select_stmt = (
select(TI.dag_version_id)
.where(TI.dag_id == self.dag_id, TI.dag_version_id == dag_version_id, TI.run_id == self.run_id)
.union(
select(TIH.dag_version_id).where(
TIH.dag_id == self.dag_id, TIH.dag_version_id == dag_version_id, TIH.run_id == self.run_id
)
)
)
return session.scalar(select_stmt)
@property
def stats_tags(self) -> dict[str, str]:
return prune_dict({"dag_id": self.dag_id, "run_type": self.run_type})
@classmethod
def set_active_spans(cls, active_spans: ThreadSafeDict):
cls.active_spans = active_spans
def get_state(self):
return self._state
def set_state(self, state: DagRunState) -> None:
"""
Change the state of the DagRan.
Changes to attributes are implemented in accordance with the following table
(rows represent old states, columns represent new states):
.. list-table:: State transition matrix
:header-rows: 1
:stub-columns: 1
* -
- QUEUED
- RUNNING
- SUCCESS
- FAILED
* - None
- queued_at = timezone.utcnow()
- if empty: start_date = timezone.utcnow()
end_date = None
- end_date = timezone.utcnow()
- end_date = timezone.utcnow()
* - QUEUED
- queued_at = timezone.utcnow()
- if empty: start_date = timezone.utcnow()
end_date = None
- end_date = timezone.utcnow()
- end_date = timezone.utcnow()
* - RUNNING
- queued_at = timezone.utcnow()
start_date = None
end_date = None
-
- end_date = timezone.utcnow()
- end_date = timezone.utcnow()
* - SUCCESS
- queued_at = timezone.utcnow()
start_date = None
end_date = None
- start_date = timezone.utcnow()
end_date = None
-
-
* - FAILED
- queued_at = timezone.utcnow()
start_date = None
end_date = None
- start_date = timezone.utcnow()
end_date = None
-
-
"""
if state not in State.dag_states:
raise ValueError(f"invalid DagRun state: {state}")
if self._state != state:
if state == DagRunState.QUEUED:
self.queued_at = timezone.utcnow()
self.start_date = None
self.end_date = None
if state == DagRunState.RUNNING:
if self._state in State.finished_dr_states:
self.start_date = timezone.utcnow()
else:
self.start_date = self.start_date or timezone.utcnow()
self.end_date = None
if self._state in State.unfinished_dr_states or self._state is None:
if state in State.finished_dr_states:
self.end_date = timezone.utcnow()
self._state = state
else:
if state == DagRunState.QUEUED:
self.queued_at = timezone.utcnow()
@declared_attr
def state(self):
return synonym("_state", descriptor=property(self.get_state, self.set_state))
@provide_session
def refresh_from_db(self, session: Session = NEW_SESSION) -> None:
"""
Reload the current dagrun from the database.
:param session: database session
"""
dr = session.scalars(
select(DagRun).where(DagRun.dag_id == self.dag_id, DagRun.run_id == self.run_id)
).one()
self.id = dr.id
self.state = dr.state
@classmethod
@provide_session
def active_runs_of_dags(
cls,
*,
dag_ids: Iterable[str],
exclude_backfill,
session: Session = NEW_SESSION,
) -> dict[str, int]:
"""
Get the number of active dag runs for each dag.
:meta private:
"""
query = (
select(cls.dag_id, func.count("*"))
.where(cls.dag_id.in_(set(dag_ids)))
.where(cls.state.in_((DagRunState.RUNNING, DagRunState.QUEUED)))
.group_by(cls.dag_id)
)
if exclude_backfill:
query = query.where(cls.run_type != DagRunType.BACKFILL_JOB)
return {dag_id: count for dag_id, count in session.execute(query)}
@classmethod
@retry_db_transaction
def get_running_dag_runs_to_examine(cls, session: Session) -> ScalarResult[DagRun]:
"""
Return the next DagRuns that the scheduler should attempt to schedule.
This will return zero or more DagRun rows that are row-level-locked with a "SELECT ... FOR UPDATE"
query, you should ensure that any scheduling decisions are made in a single transaction -- as soon as
the transaction is committed it will be unlocked.
:meta private:
"""
from airflow.models.backfill import BackfillDagRun
from airflow.models.dag import DagModel
query = (
select(cls)
.with_hint(cls, "USE INDEX (idx_dag_run_running_dags)", dialect_name="mysql")
.where(cls.state == DagRunState.RUNNING)
.join(DagModel, DagModel.dag_id == cls.dag_id)
.join(BackfillDagRun, BackfillDagRun.dag_run_id == DagRun.id, isouter=True)
.where(
DagModel.is_paused == false(),
DagModel.is_stale == false(),
)
.options(joinedload(cls.task_instances))
.order_by(
nulls_first(cast("ColumnElement[Any]", BackfillDagRun.sort_ordinal), session=session),
nulls_first(cast("ColumnElement[Any]", cls.last_scheduling_decision), session=session),
cls.run_after,
)
.limit(cls.DEFAULT_DAGRUNS_TO_EXAMINE)
)
query = query.where(DagRun.run_after <= func.now())
result = session.scalars(with_row_locks(query, of=cls, session=session, skip_locked=True)).unique()
return result
@classmethod
@retry_db_transaction
def get_queued_dag_runs_to_set_running(cls, session: Session) -> ScalarResult[DagRun]:
"""
Return the next queued DagRuns that the scheduler should attempt to schedule.
This will return zero or more DagRun rows that are row-level-locked with a "SELECT ... FOR UPDATE"
query, you should ensure that any scheduling decisions are made in a single transaction -- as soon as
the transaction is committed it will be unlocked.
:meta private:
"""
from airflow.models.backfill import Backfill, BackfillDagRun
from airflow.models.dag import DagModel
# For dag runs in the queued state, we check if they have reached the max_active_runs limit
# and if so we drop them
running_drs = (
select(
DagRun.dag_id,
DagRun.backfill_id,
func.count(DagRun.id).label("num_running"),
)
.where(DagRun.state == DagRunState.RUNNING)
.group_by(DagRun.dag_id, DagRun.backfill_id)
.subquery()
)
query = (
select(cls)
.where(cls.state == DagRunState.QUEUED)
.join(
DagModel,
and_(
DagModel.dag_id == cls.dag_id,
DagModel.is_paused == false(),
DagModel.is_stale == false(),
),
)
.join(
BackfillDagRun,
and_(
BackfillDagRun.dag_run_id == DagRun.id,
BackfillDagRun.backfill_id == DagRun.backfill_id,
),
isouter=True,
)
.join(Backfill, isouter=True)
.join(
running_drs,
and_(
running_drs.c.dag_id == DagRun.dag_id,
coalesce(running_drs.c.backfill_id, text("-1"))
== coalesce(DagRun.backfill_id, text("-1")),
),
isouter=True,
)
.where(
# there are two levels of checks for num_running
# the one done in this query verifies that the dag is not maxed out
# it could return many more dag runs than runnable if there is even
# capacity for 1. this could be improved.
coalesce(running_drs.c.num_running, text("0"))
< coalesce(Backfill.max_active_runs, DagModel.max_active_runs),
# don't set paused dag runs as running
not_(coalesce(cast("ColumnElement[bool]", Backfill.is_paused), False)),
)
.order_by(
# ordering by backfill sort ordinal first ensures that backfill dag runs
# have lower priority than all other dag run types (since sort_ordinal >= 1).
# additionally, sorting by sort_ordinal ensures that the backfill
# dag runs are created in the right order when that matters.
# todo: AIP-78 use row_number to avoid starvation; limit the number of returned runs per-dag
nulls_first(cast("ColumnElement[Any]", BackfillDagRun.sort_ordinal), session=session),
nulls_first(cast("ColumnElement[Any]", cls.last_scheduling_decision), session=session),
nulls_first(running_drs.c.num_running, session=session), # many running -> lower priority
cls.run_after,
)
.limit(cls.DEFAULT_DAGRUNS_TO_EXAMINE)
)
query = query.where(DagRun.run_after <= func.now())
return session.scalars(with_row_locks(query, of=cls, session=session, skip_locked=True))
@classmethod
@provide_session
def find(
cls,
dag_id: str | list[str] | None = None,
run_id: Iterable[str] | None = None,
logical_date: datetime | Iterable[datetime] | None = None,
state: DagRunState | None = None,
no_backfills: bool = False,
run_type: DagRunType | None = None,
session: Session = NEW_SESSION,
logical_start_date: datetime | None = None,
logical_end_date: datetime | None = None,
) -> list[DagRun]:
"""
Return a set of dag runs for the given search criteria.
:param dag_id: the dag_id or list of dag_id to find dag runs for
:param run_id: defines the run id for this dag run
:param run_type: type of DagRun
:param logical_date: the logical date
:param state: the state of the dag run
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:param session: database session
:param logical_start_date: dag run that was executed from this date
:param logical_end_date: dag run that was executed until this date
"""
qry = select(cls)
dag_ids = [dag_id] if isinstance(dag_id, str) else dag_id
if dag_ids:
qry = qry.where(cls.dag_id.in_(dag_ids))
if is_container(run_id):
qry = qry.where(cls.run_id.in_(run_id))
elif run_id is not None:
qry = qry.where(cls.run_id == run_id)
if is_container(logical_date):
qry = qry.where(cls.logical_date.in_(logical_date))
elif logical_date is not None:
qry = qry.where(cls.logical_date == logical_date)
if logical_start_date and logical_end_date:
qry = qry.where(cls.logical_date.between(logical_start_date, logical_end_date))
elif logical_start_date:
qry = qry.where(cls.logical_date >= logical_start_date)
elif logical_end_date:
qry = qry.where(cls.logical_date <= logical_end_date)
if state:
qry = qry.where(cls.state == state)
if run_type:
qry = qry.where(cls.run_type == run_type)
if no_backfills:
qry = qry.where(cls.run_type != DagRunType.BACKFILL_JOB)
return list(session.scalars(qry.order_by(cls.logical_date)).all())
@classmethod
@provide_session
def find_duplicate(cls, dag_id: str, run_id: str, *, session: Session = NEW_SESSION) -> DagRun | None:
"""
Return an existing run for the DAG with a specific run_id.
*None* is returned if no such DAG run is found.
:param dag_id: the dag_id to find duplicates for
:param run_id: defines the run id for this dag run
:param session: database session
"""
return session.scalars(select(cls).where(cls.dag_id == dag_id, cls.run_id == run_id)).one_or_none()
@staticmethod
def generate_run_id(
*, run_type: DagRunType, logical_date: datetime | None = None, run_after: datetime
) -> str:
"""
Generate Run ID based on Run Type, run_after and logical Date.
:param run_type: type of DagRun
:param logical_date: the logical date
:param run_after: the date before which dag run won't start.
"""
# _Ensure_ run_type is a DagRunType, not just a string from user code
if logical_date:
return DagRunType(run_type).generate_run_id(suffix=run_after.isoformat())
return DagRunType(run_type).generate_run_id(suffix=f"{run_after.isoformat()}_{get_random_string()}")
@staticmethod
@provide_session
def fetch_task_instances(
dag_id: str | None = None,
run_id: str | None = None,
task_ids: list[str] | None = None,
state: Iterable[TaskInstanceState | None] | None = None,
session: Session = NEW_SESSION,
) -> list[TI]:
"""Return the task instances for this dag run."""
tis = (
select(TI)
.options(joinedload(TI.dag_run))
.where(
TI.dag_id == dag_id,
TI.run_id == run_id,
)
.order_by(TI.task_id, TI.map_index)
)
if state:
if isinstance(state, str):
tis = tis.where(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
if all(x is None for x in state):
tis = tis.where(TI.state.is_(None))
else:
not_none_state = (s for s in state if s)
tis = tis.where(or_(TI.state.in_(not_none_state), TI.state.is_(None)))
else:
tis = tis.where(TI.state.in_(state))
if task_ids is not None:
tis = tis.where(TI.task_id.in_(task_ids))
return list(session.scalars(tis).all())
def _check_last_n_dagruns_failed(self, dag_id, max_consecutive_failed_dag_runs, session):
"""Check if last N dags failed."""
dag_runs = session.scalars(
select(DagRun)
.where(DagRun.dag_id == dag_id)
.order_by(DagRun.logical_date.desc())
.limit(max_consecutive_failed_dag_runs)
).all()
""" Marking dag as paused, if needed"""
to_be_paused = len(dag_runs) >= max_consecutive_failed_dag_runs and all(
dag_run.state == DagRunState.FAILED for dag_run in dag_runs
)
if to_be_paused:
from airflow.models.dag import DagModel
self.log.info(
"Pausing DAG %s because last %s DAG runs failed.",
self.dag_id,
max_consecutive_failed_dag_runs,
)
filter_query = [
DagModel.dag_id == self.dag_id,
]
session.execute(
update(DagModel)
.where(or_(*filter_query))
.values(is_paused=True)
.execution_options(synchronize_session="fetch")
)
session.add(
Log(
event="paused",
dag_id=self.dag_id,
owner="scheduler",
owner_display_name="Scheduler",
extra=f"[('dag_id', '{self.dag_id}'), ('is_paused', True)]",
)
)
else:
self.log.debug(
"Limit of consecutive DAG failed dag runs is not reached, DAG %s will not be paused.",
self.dag_id,
)
@provide_session
def get_task_instances(
self,
state: Iterable[TaskInstanceState | None] | None = None,
session: Session = NEW_SESSION,
) -> list[TI]:
"""
Return the task instances for this dag run.
Redirect to DagRun.fetch_task_instances method.
Keep this method because it is widely used across the code.
"""
task_ids = DagRun._get_partial_task_ids(self.dag)
return DagRun.fetch_task_instances(
dag_id=self.dag_id, run_id=self.run_id, task_ids=task_ids, state=state, session=session
)
@provide_session
def get_task_instance(
self,
task_id: str,
session: Session = NEW_SESSION,
*,
map_index: int = -1,
) -> TI | None:
"""
Return the task instance specified by task_id for this dag run.
:param task_id: the task id
:param session: Sqlalchemy ORM Session
"""
return DagRun.fetch_task_instance(
dag_id=self.dag_id,
dag_run_id=self.run_id,
task_id=task_id,
session=session,
map_index=map_index,
)
@staticmethod
@provide_session
def fetch_task_instance(
dag_id: str,
dag_run_id: str,
task_id: str,
session: Session = NEW_SESSION,
map_index: int = -1,
) -> TI | None:
"""
Return the task instance specified by task_id for this dag run.
:param dag_id: the DAG id
:param dag_run_id: the DAG run id
:param task_id: the task id
:param session: Sqlalchemy ORM Session
"""
return session.scalars(
select(TI).filter_by(dag_id=dag_id, run_id=dag_run_id, task_id=task_id, map_index=map_index)
).one_or_none()
def get_dag(self) -> SerializedDAG:
"""
Return the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException(f"The DAG (.dag) for {self} needs to be set")
return self.dag
@staticmethod
@provide_session
def get_previous_dagrun(
dag_run: DagRun, state: DagRunState | None = None, session: Session = NEW_SESSION
) -> DagRun | None:
"""
Return the previous DagRun, if there is one.
:param dag_run: the dag run
:param session: SQLAlchemy ORM Session
:param state: the dag run state
"""
if not dag_run or dag_run.logical_date is None:
return None
filters = [
DagRun.dag_id == dag_run.dag_id,
DagRun.logical_date < dag_run.logical_date,
]
if state is not None:
filters.append(DagRun.state == state)
return session.scalar(select(DagRun).where(*filters).order_by(DagRun.logical_date.desc()).limit(1))
@staticmethod
@provide_session
def get_previous_scheduled_dagrun(
dag_run_id: int,
session: Session = NEW_SESSION,
) -> DagRun | None:
"""
Return the previous SCHEDULED DagRun, if there is one.
:param dag_run_id: the DAG run ID
:param session: SQLAlchemy ORM Session
"""
dag_run = session.get(DagRun, dag_run_id)
if not dag_run or not dag_run.logical_date:
return None
return session.scalar(
select(DagRun)
.where(
DagRun.dag_id == dag_run.dag_id,
DagRun.logical_date < dag_run.logical_date,
DagRun.run_type != DagRunType.MANUAL,
)
.order_by(DagRun.logical_date.desc())
.limit(1)
)
def _tis_for_dagrun_state(self, *, dag, tis):
"""
Return the collection of tasks that should be considered for evaluation of terminal dag run state.
Teardown tasks by default are not considered for the purpose of dag run state. But
users may enable such consideration with on_failure_fail_dagrun.
"""
def is_effective_leaf(task):
for down_task_id in task.downstream_task_ids:
down_task = dag.get_task(down_task_id)
if not down_task.is_teardown or down_task.on_failure_fail_dagrun:
# we found a down task that is not ignorable; not a leaf
return False
# we found no ignorable downstreams
# evaluate whether task is itself ignorable
return not task.is_teardown or task.on_failure_fail_dagrun
leaf_task_ids = {x.task_id for x in dag.tasks if is_effective_leaf(x)}
if not leaf_task_ids:
# can happen if dag is exclusively teardown tasks
leaf_task_ids = {x.task_id for x in dag.tasks if not x.downstream_list}
leaf_tis = {ti for ti in tis if ti.task_id in leaf_task_ids if ti.state != TaskInstanceState.REMOVED}
return leaf_tis
def set_dagrun_span_attrs(self, span: Span | EmptySpan):
if self._state == DagRunState.FAILED:
span.set_attribute("airflow.dag_run.error", True)
# Explicitly set the value type to Union[...] to avoid a mypy error.
attributes: dict[str, AttributeValueType] = {
"airflow.category": "DAG runs",
"airflow.dag_run.dag_id": str(self.dag_id),
"airflow.dag_run.logical_date": str(self.logical_date),
"airflow.dag_run.run_id": str(self.run_id),
"airflow.dag_run.queued_at": str(self.queued_at),
"airflow.dag_run.run_start_date": str(self.start_date),
"airflow.dag_run.run_end_date": str(self.end_date),
"airflow.dag_run.run_duration": str(
(self.end_date - self.start_date).total_seconds() if self.start_date and self.end_date else 0
),
"airflow.dag_run.state": str(self._state),
"airflow.dag_run.run_type": str(self.run_type),
"airflow.dag_run.data_interval_start": str(self.data_interval_start),
"airflow.dag_run.data_interval_end": str(self.data_interval_end),
"airflow.dag_run.conf": str(self.conf),
}
if span.is_recording():
span.add_event(name="airflow.dag_run.queued", timestamp=datetime_to_nano(self.queued_at))
span.add_event(name="airflow.dag_run.started", timestamp=datetime_to_nano(self.start_date))
span.add_event(name="airflow.dag_run.ended", timestamp=datetime_to_nano(self.end_date))
span.set_attributes(attributes)
def start_dr_spans_if_needed(self, tis: list[TI]):
# If there is no value in active_spans, then the span hasn't already been started.
if self.active_spans is not None and self.active_spans.get("dr:" + str(self.id)) is None:
if self.span_status == SpanStatus.NOT_STARTED or self.span_status == SpanStatus.NEEDS_CONTINUANCE:
dr_span = None
continue_ti_spans = False
if self.span_status == SpanStatus.NOT_STARTED:
dr_span = Trace.start_root_span(
span_name=f"{self.dag_id}",
component="dag",
start_time=self.queued_at, # This is later converted to nano.
start_as_current=False,
)
elif self.span_status == SpanStatus.NEEDS_CONTINUANCE:
# Use the existing context_carrier to set the initial dag_run span as the parent.
parent_context = Trace.extract(self.context_carrier)
with Trace.start_child_span(
span_name="new_scheduler", parent_context=parent_context
) as s:
s.set_attribute("trace_status", "continued")
dr_span = Trace.start_child_span(
span_name=f"{self.dag_id}_continued",
parent_context=parent_context,
component="dag",
# No start time
start_as_current=False,
)
# After this span is started, the context_carrier will be replaced by the new one.
# New task span will use this span as the parent.
continue_ti_spans = True
carrier = Trace.inject()
self.context_carrier = carrier
self.span_status = SpanStatus.ACTIVE
# Set the span in a synchronized dictionary, so that the variable can be used to end the span.
self.active_spans.set("dr:" + str(self.id), dr_span)
self.log.debug(
"DagRun span has been started and the injected context_carrier is: %s",
self.context_carrier,
)
# Start TI spans that also need continuance.
if continue_ti_spans:
new_dagrun_context = Trace.extract(self.context_carrier)
for ti in tis:
if ti.span_status == SpanStatus.NEEDS_CONTINUANCE:
ti_span = Trace.start_child_span(
span_name=f"{ti.task_id}_continued",
parent_context=new_dagrun_context,
start_as_current=False,
)
ti_carrier = Trace.inject()
ti.context_carrier = ti_carrier
ti.span_status = SpanStatus.ACTIVE
self.active_spans.set("ti:" + ti.id, ti_span)
else:
self.log.debug(
"Found span_status '%s', while updating state for dag_run '%s'",
self.span_status,
self.run_id,
)
def end_dr_span_if_needed(self):
if self.active_spans is not None:
active_span = self.active_spans.get("dr:" + str(self.id))
if active_span is not None:
self.log.debug(
"Found active span with span_id: %s, for dag_id: %s, run_id: %s, state: %s",
active_span.get_span_context().span_id,
self.dag_id,
self.run_id,
self.state,
)
self.set_dagrun_span_attrs(span=active_span)
active_span.end(end_time=datetime_to_nano(self.end_date))
# Remove the span from the dict.
self.active_spans.delete("dr:" + str(self.id))
self.span_status = SpanStatus.ENDED
else:
if self.span_status == SpanStatus.ACTIVE:
# Another scheduler has started the span.
# Update the DB SpanStatus to notify the owner to end it.
self.span_status = SpanStatus.SHOULD_END
elif self.span_status == SpanStatus.NEEDS_CONTINUANCE:
# This is a corner case where the scheduler exited gracefully
# while the dag_run was almost done.
# Since it reached this point, the dag has finished but there has been no time
# to create a new span for the current scheduler.
# There is no need for more spans, update the status on the db.
self.span_status = SpanStatus.ENDED
else:
self.log.debug(
"No active span has been found for dag_id: %s, run_id: %s, state: %s",
self.dag_id,
self.run_id,
self.state,
)
@provide_session
def update_state(
self, session: Session = NEW_SESSION, execute_callbacks: bool = True
) -> tuple[list[TI], DagCallbackRequest | None]:
"""
Determine the overall state of the DagRun based on the state of its TaskInstances.
:param session: Sqlalchemy ORM Session
:param execute_callbacks: Should dag callbacks (success/failure, SLA etc.) be invoked
directly (default: true) or recorded as a pending request in the ``returned_callback`` property
:return: Tuple containing tis that can be scheduled in the current loop & `returned_callback` that
needs to be executed
"""
# Callback to execute in case of Task Failures
callback: DagCallbackRequest | None = None
class _UnfinishedStates(NamedTuple):
tis: Sequence[TI]
@classmethod
def calculate(cls, unfinished_tis: Sequence[TI]) -> _UnfinishedStates:
return cls(tis=unfinished_tis)
@property
def should_schedule(self) -> bool:
return (
bool(self.tis)
and all(not getattr(t.task, "depends_on_past", False) for t in self.tis if t.task)
and all(
getattr(t.task, "max_active_tis_per_dag", None) is None for t in self.tis if t.task
)
and all(
getattr(t.task, "max_active_tis_per_dagrun", None) is None for t in self.tis if t.task
)
and all(t.state != TaskInstanceState.DEFERRED for t in self.tis)
)
def recalculate(self) -> _UnfinishedStates:
return self._replace(tis=[t for t in self.tis if t.state in State.unfinished])
start_dttm = timezone.utcnow()
self.last_scheduling_decision = start_dttm
with (
Stats.timer(f"dagrun.dependency-check.{self.dag_id}"),
Stats.timer("dagrun.dependency-check", tags=self.stats_tags),
):
dag = self.get_dag()
info = self.task_instance_scheduling_decisions(session)
tis = info.tis
schedulable_tis = info.schedulable_tis
changed_tis = info.changed_tis
finished_tis = info.finished_tis
unfinished = _UnfinishedStates.calculate(info.unfinished_tis)
if unfinished.should_schedule:
are_runnable_tasks = schedulable_tis or changed_tis
# small speed up
if not are_runnable_tasks:
are_runnable_tasks, changed_by_upstream = self._are_premature_tis(
unfinished.tis, finished_tis, session
)
if changed_by_upstream: # Something changed, we need to recalculate!
unfinished = unfinished.recalculate()
tis_for_dagrun_state = self._tis_for_dagrun_state(dag=dag, tis=tis)
# if all tasks finished and at least one failed, the run failed
if not unfinished.tis and any(x.state in State.failed_states for x in tis_for_dagrun_state):
self.log.info("Marking run %s failed", self)
self.set_state(DagRunState.FAILED)
self.notify_dagrun_state_changed(msg="task_failure")
if execute_callbacks and dag.has_on_failure_callback:
self.handle_dag_callback(dag=cast("SDKDAG", dag), success=False, reason="task_failure")
elif dag.has_on_failure_callback:
callback = DagCallbackRequest(
filepath=self.dag_model.relative_fileloc,
dag_id=self.dag_id,
run_id=self.run_id,
bundle_name=self.dag_model.bundle_name,
bundle_version=self.bundle_version,
context_from_server=DagRunContext(
dag_run=self,
last_ti=self.get_last_ti(dag=dag, session=session),
),
is_failure_callback=True,
msg="task_failure",
)
# Check if the max_consecutive_failed_dag_runs has been provided and not 0
# and last consecutive failures are more
if dag.max_consecutive_failed_dag_runs > 0:
self.log.debug(
"Checking consecutive failed DAG runs for DAG %s, limit is %s",
self.dag_id,
dag.max_consecutive_failed_dag_runs,
)
self._check_last_n_dagruns_failed(dag.dag_id, dag.max_consecutive_failed_dag_runs, session)
# if all leaves succeeded and no unfinished tasks, the run succeeded
elif not unfinished.tis and all(x.state in State.success_states for x in tis_for_dagrun_state):
self.log.info("Marking run %s successful", self)
self.set_state(DagRunState.SUCCESS)
self.notify_dagrun_state_changed(msg="success")
if execute_callbacks and dag.has_on_success_callback:
self.handle_dag_callback(dag=cast("SDKDAG", dag), success=True, reason="success")
elif dag.has_on_success_callback:
callback = DagCallbackRequest(
filepath=self.dag_model.relative_fileloc,
dag_id=self.dag_id,
run_id=self.run_id,
bundle_name=self.dag_model.bundle_name,
bundle_version=self.bundle_version,
context_from_server=DagRunContext(
dag_run=self,
last_ti=self.get_last_ti(dag=dag, session=session),
),
is_failure_callback=False,
msg="success",
)
if dag.deadline:
# The dagrun has succeeded. If there were any Deadlines for it which were not breached, they are no longer needed.
if any(
isinstance(d.reference, DeadlineReference.TYPES.DAGRUN)
for d in cast("list", dag.deadline)
):
Deadline.prune_deadlines(session=session, conditions={DagRun.run_id: self.run_id})
# if *all tasks* are deadlocked, the run failed
elif unfinished.should_schedule and not are_runnable_tasks:
self.log.error("Task deadlock (no runnable tasks); marking run %s failed", self)
self.set_state(DagRunState.FAILED)
self.notify_dagrun_state_changed(msg="all_tasks_deadlocked")
if execute_callbacks and dag.has_on_failure_callback:
self.handle_dag_callback(
dag=cast("SDKDAG", dag),
success=False,
reason="all_tasks_deadlocked",
)
elif dag.has_on_failure_callback:
callback = DagCallbackRequest(
filepath=self.dag_model.relative_fileloc,
dag_id=self.dag_id,
run_id=self.run_id,
bundle_name=self.dag_model.bundle_name,
bundle_version=self.bundle_version,
context_from_server=DagRunContext(
dag_run=self,
last_ti=self.get_last_ti(dag=dag, session=session),
),
is_failure_callback=True,
msg="all_tasks_deadlocked",
)
# finally, if the leaves aren't done, the dag is still running
else:
# It might need to start TI spans as well.
self.start_dr_spans_if_needed(tis=tis)
self.set_state(DagRunState.RUNNING)
if self._state == DagRunState.FAILED or self._state == DagRunState.SUCCESS:
msg = (
"DagRun Finished: dag_id=%s, logical_date=%s, run_id=%s, "
"run_start_date=%s, run_end_date=%s, run_duration=%s, "
"state=%s, run_type=%s, "
"data_interval_start=%s, data_interval_end=%s,"
)
self.log.info(
msg,
self.dag_id,
self.logical_date,
self.run_id,
self.start_date,
self.end_date,
(
(self.end_date - self.start_date).total_seconds()
if self.start_date and self.end_date
else None
),
self._state,
self.run_type,
self.data_interval_start,
self.data_interval_end,
)
self.end_dr_span_if_needed()
session.flush()
self._emit_true_scheduling_delay_stats_for_finished_state(finished_tis)
self._emit_duration_stats_for_finished_state()
session.merge(self)
# We do not flush here for performance reasons(It increases queries count by +20)
return schedulable_tis, callback
@provide_session
def task_instance_scheduling_decisions(self, session: Session = NEW_SESSION) -> TISchedulingDecision:
tis = self.get_task_instances(session=session, state=State.task_states)
self.log.debug("number of tis tasks for %s: %s task(s)", self, len(tis))
def _filter_tis_and_exclude_removed(dag: SerializedDAG, tis: list[TI]) -> Iterable[TI]:
"""Populate ``ti.task`` while excluding those missing one, marking them as REMOVED."""
for ti in tis:
try:
ti.task = dag.get_task(ti.task_id)
except TaskNotFound:
if ti.state != TaskInstanceState.REMOVED:
self.log.error("Failed to get task for ti %s. Marking it as removed.", ti)
ti.state = TaskInstanceState.REMOVED
session.flush()
else:
yield ti
tis = list(_filter_tis_and_exclude_removed(self.get_dag(), tis))
unfinished_tis = [t for t in tis if t.state in State.unfinished]
finished_tis = [t for t in tis if t.state in State.finished]
if unfinished_tis:
schedulable_tis = [ut for ut in unfinished_tis if ut.state in SCHEDULEABLE_STATES]
self.log.debug("number of scheduleable tasks for %s: %s task(s)", self, len(schedulable_tis))
schedulable_tis, changed_tis, expansion_happened = self._get_ready_tis(
schedulable_tis,
finished_tis,
session=session,
)
# During expansion, we may change some tis into non-schedulable
# states, so we need to re-compute.
if expansion_happened:
changed_tis = True
new_unfinished_tis = [t for t in unfinished_tis if t.state in State.unfinished]
finished_tis.extend(t for t in unfinished_tis if t.state in State.finished)
unfinished_tis = new_unfinished_tis
else:
schedulable_tis = []
changed_tis = False
return TISchedulingDecision(
tis=tis,
schedulable_tis=schedulable_tis,
changed_tis=changed_tis,
unfinished_tis=unfinished_tis,
finished_tis=finished_tis,
)
def notify_dagrun_state_changed(self, msg: str):
try:
if self.state == DagRunState.RUNNING:
get_listener_manager().hook.on_dag_run_running(dag_run=self, msg=msg)
elif self.state == DagRunState.SUCCESS:
get_listener_manager().hook.on_dag_run_success(dag_run=self, msg=msg)
elif self.state == DagRunState.FAILED:
get_listener_manager().hook.on_dag_run_failed(dag_run=self, msg=msg)
except Exception:
self.log.exception("Error while calling listener")
# deliberately not notifying on QUEUED
# we can't get all the state changes on SchedulerJob,
# or LocalTaskJob, so we don't want to "falsely advertise" we notify about that
@provide_session
def get_last_ti(self, dag: SerializedDAG, session: Session = NEW_SESSION) -> TI | None:
"""Get Last TI from the dagrun to build and pass Execution context object from server to then run callbacks."""
tis = self.get_task_instances(session=session)
# tis from a dagrun may not be a part of dag.partial_subset,
# since dag.partial_subset is a subset of the dag.
# This ensures that we will only use the accessible TI
# context for the callback.
if dag.partial:
tis = [ti for ti in tis if not ti.state == State.NONE]
# filter out removed tasks
tis = natsorted(
(ti for ti in tis if ti.state != TaskInstanceState.REMOVED),
key=lambda ti: ti.task_id,
)
if not tis:
return None
ti = tis[-1] # get last TaskInstance of DagRun
return ti
def handle_dag_callback(self, dag: SDKDAG, success: bool = True, reason: str = "success"):
"""Only needed for `dag.test` where `execute_callbacks=True` is passed to `update_state`."""
from airflow.api_fastapi.execution_api.datamodels.taskinstance import (
DagRun as DRDataModel,
TaskInstance as TIDataModel,
TIRunContext,
)
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance
last_ti = self.get_last_ti(cast("SerializedDAG", dag))
if last_ti:
last_ti_model = TIDataModel.model_validate(last_ti, from_attributes=True)
task = dag.get_task(last_ti.task_id)
dag_run_data = DRDataModel(
dag_id=self.dag_id,
run_id=self.run_id,
logical_date=self.logical_date,
data_interval_start=self.data_interval_start,
data_interval_end=self.data_interval_end,
run_after=self.run_after,
start_date=self.start_date or timezone.utcnow(),
end_date=self.end_date,
run_type=DagRunType(self.run_type),
state=self.state,
conf=self.conf,
consumed_asset_events=[],
partition_key=self.partition_key,
)
runtime_ti = RuntimeTaskInstance.model_construct(
**last_ti_model.model_dump(exclude_unset=True),
task=task,
_ti_context_from_server=TIRunContext(
dag_run=dag_run_data,
max_tries=last_ti.max_tries,
variables=[],
connections=[],
xcom_keys_to_clear=[],
),
max_tries=last_ti.max_tries,
)
context = runtime_ti.get_template_context()
else:
context = {
"dag": dag,
"run_id": self.run_id,
}
context["reason"] = reason
callbacks = dag.on_success_callback if success else dag.on_failure_callback
if not callbacks:
self.log.warning("Callback requested, but dag didn't have any for DAG: %s.", dag.dag_id)
return
callbacks = callbacks if isinstance(callbacks, list) else [callbacks]
for callback in callbacks:
self.log.info(
"Executing on_%s dag callback: %s",
"success" if success else "failure",
callback.__name__ if hasattr(callback, "__name__") else repr(callback),
)
try:
callback(context)
except Exception:
self.log.exception("Callback failed for %s", dag.dag_id)
Stats.incr("dag.callback_exceptions", tags={"dag_id": dag.dag_id})
def _get_ready_tis(
self,
schedulable_tis: list[TI],
finished_tis: list[TI],
session: Session,
) -> tuple[list[TI], bool, bool]:
old_states: dict[TaskInstanceKey, Any] = {}
ready_tis: list[TI] = []
changed_tis = False
if not schedulable_tis:
return ready_tis, changed_tis, False
# If we expand TIs, we need a new list so that we iterate over them too. (We can't alter
# `schedulable_tis` in place and have the `for` loop pick them up
additional_tis: list[TI] = []
dep_context = DepContext(
flag_upstream_failed=True,
ignore_unmapped_tasks=True, # Ignore this Dep, as we will expand it if we can.
finished_tis=finished_tis,
)
def _expand_mapped_task_if_needed(ti: TI) -> Iterable[TI] | None:
"""
Try to expand the ti, if needed.
If the ti needs expansion, newly created task instances are
returned as well as the original ti.
The original ti is also modified in-place and assigned the
``map_index`` of 0.
If the ti does not need expansion, either because the task is not
mapped, or has already been expanded, *None* is returned.
"""
from airflow.models.mappedoperator import is_mapped
if TYPE_CHECKING:
assert ti.task
if ti.map_index >= 0: # Already expanded, we're good.
return None
if is_mapped(ti.task):
# If we get here, it could be that we are moving from non-mapped to mapped
# after task instance clearing or this ti is not yet expanded. Safe to clear
# the db references.
ti.clear_db_references(session=session)
try:
expanded_tis, _ = TaskMap.expand_mapped_task(ti.task, self.run_id, session=session)
except NotMapped: # Not a mapped task, nothing needed.
return None
if expanded_tis:
return expanded_tis
return ()
# Check dependencies.
expansion_happened = False
# Set of task ids for which was already done _revise_map_indexes_if_mapped
revised_map_index_task_ids: set[str] = set()
for schedulable in itertools.chain(schedulable_tis, additional_tis):
if TYPE_CHECKING:
assert isinstance(schedulable.task, Operator)
old_state = schedulable.state
if not schedulable.are_dependencies_met(session=session, dep_context=dep_context):
old_states[schedulable.key] = old_state
continue
# If schedulable is not yet expanded, try doing it now. This is
# called in two places: First and ideally in the mini scheduler at
# the end of LocalTaskJob, and then as an "expansion of last resort"
# in the scheduler to ensure that the mapped task is correctly
# expanded before executed. Also see _revise_map_indexes_if_mapped
# docstring for additional information.
new_tis = None
if schedulable.map_index < 0:
new_tis = _expand_mapped_task_if_needed(schedulable)
if new_tis is not None:
additional_tis.extend(new_tis)
expansion_happened = True
if new_tis is None and schedulable.state in SCHEDULEABLE_STATES:
# It's enough to revise map index once per task id,
# checking the map index for each mapped task significantly slows down scheduling
if schedulable.task.task_id not in revised_map_index_task_ids:
ready_tis.extend(
self._revise_map_indexes_if_mapped(
schedulable.task, dag_version_id=schedulable.dag_version_id, session=session
)
)
revised_map_index_task_ids.add(schedulable.task.task_id)
# _revise_map_indexes_if_mapped might mark the current task as REMOVED
# after calculating mapped task length, so we need to re-check
# the task state to ensure it's still schedulable
if schedulable.state in SCHEDULEABLE_STATES:
ready_tis.append(schedulable)
# Check if any ti changed state
tis_filter = TI.filter_for_tis(old_states)
if tis_filter is not None:
fresh_tis = session.scalars(select(TI).where(tis_filter)).all()
changed_tis = any(ti.state != old_states[ti.key] for ti in fresh_tis)
return ready_tis, changed_tis, expansion_happened
def _are_premature_tis(
self,
unfinished_tis: Sequence[TI],
finished_tis: list[TI],
session: Session,
) -> tuple[bool, bool]:
dep_context = DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True,
ignore_in_reschedule_period=True,
finished_tis=finished_tis,
)
# there might be runnable tasks that are up for retry and for some reason(retry delay, etc.) are
# not ready yet, so we set the flags to count them in
return (
any(ut.are_dependencies_met(dep_context=dep_context, session=session) for ut in unfinished_tis),
dep_context.have_changed_ti_states,
)
def _emit_true_scheduling_delay_stats_for_finished_state(self, finished_tis: list[TI]) -> None:
"""
Emit the true scheduling delay stats.
The true scheduling delay stats is defined as the time when the first
task in DAG starts minus the expected DAG run datetime.
This helper method is used in ``update_state`` when the state of the
DAG run is updated to a completed status (either success or failure).
It finds the first started task within the DAG, calculates the run's
expected start time based on the logical date and timetable, and gets
the delay from the difference of these two values.
The emitted data may contain outliers (e.g. when the first task was
cleared, so the second task's start date will be used), but we can get
rid of the outliers on the stats side through dashboards tooling.
Note that the stat will only be emitted for scheduler-triggered DAG runs
(i.e. when ``run_type`` is *SCHEDULED* and ``clear_number`` is equal to 0).
"""
from airflow.models.dag import get_run_data_interval
if self.state == TaskInstanceState.RUNNING:
return
if self.run_type != DagRunType.SCHEDULED:
return
if self.clear_number > 0:
return
if not finished_tis:
return
try:
dag = self.get_dag()
if not dag.timetable.periodic:
# We can't emit this metric if there is no following schedule to calculate from!
return
try:
first_start_date = min(ti.start_date for ti in finished_tis if ti.start_date)
except ValueError: # No start dates at all.
pass
else:
# TODO: Logically, this should be DagRunInfo.run_after, but the
# information is not stored on a DagRun, only before the actual
# execution on DagModel.next_dagrun_create_after. We should add
# a field on DagRun for this instead of relying on the run
# always happening immediately after the data interval.
data_interval_end = get_run_data_interval(dag.timetable, self).end
true_delay = first_start_date - data_interval_end
if true_delay.total_seconds() > 0:
Stats.timing(
f"dagrun.{dag.dag_id}.first_task_scheduling_delay", true_delay, tags=self.stats_tags
)
Stats.timing("dagrun.first_task_scheduling_delay", true_delay, tags=self.stats_tags)
except Exception:
self.log.warning("Failed to record first_task_scheduling_delay metric:", exc_info=True)
def _emit_duration_stats_for_finished_state(self):
if self.state == DagRunState.RUNNING:
return
if self.start_date is None:
self.log.warning("Failed to record duration of %s: start_date is not set.", self)
return
if self.end_date is None:
self.log.warning("Failed to record duration of %s: end_date is not set.", self)
return
duration = self.end_date - self.start_date
timer_params = {"dt": duration, "tags": self.stats_tags}
Stats.timing(f"dagrun.duration.{self.state}.{self.dag_id}", **timer_params)
Stats.timing(f"dagrun.duration.{self.state}", **timer_params)
@provide_session
def verify_integrity(self, *, session: Session = NEW_SESSION, dag_version_id: UUIDType) -> None:
"""
Verify the DagRun by checking for removed tasks or tasks that are not in the database yet.
It will set state to removed or add the task if required.
:param dag_version_id: The DAG version ID
:param session: Sqlalchemy ORM Session
"""
from airflow.settings import task_instance_mutation_hook
# Set for the empty default in airflow.settings -- if it's not set this means it has been changed
# Note: Literal[True, False] instead of bool because otherwise it doesn't correctly find the overload.
hook_is_noop: Literal[True, False] = getattr(task_instance_mutation_hook, "is_noop", False)
dag = self.get_dag()
task_ids = self._check_for_removed_or_restored_tasks(
dag, task_instance_mutation_hook, session=session
)
def task_filter(task: Operator) -> bool:
return task.task_id not in task_ids and (
self.run_type == DagRunType.BACKFILL_JOB
or (
task.start_date is None
or self.logical_date is None
or task.start_date <= self.logical_date
)
and (task.end_date is None or self.logical_date is None or self.logical_date <= task.end_date)
)
created_counts: dict[str, int] = defaultdict(int)
task_creator = self._get_task_creator(
created_counts, task_instance_mutation_hook, hook_is_noop, dag_version_id
)
# Create the missing tasks, including mapped tasks
tis_to_create = self._create_tasks(
(task for task in dag.task_dict.values() if task_filter(task)),
task_creator,
session=session,
)
self._create_task_instances(self.dag_id, tis_to_create, created_counts, hook_is_noop, session=session)
def _check_for_removed_or_restored_tasks(
self, dag: SerializedDAG, ti_mutation_hook, *, session: Session
) -> set[str]:
"""
Check for removed tasks/restored/missing tasks.
:param dag: DAG object corresponding to the dagrun
:param ti_mutation_hook: task_instance_mutation_hook function
:param session: Sqlalchemy ORM Session
:return: Task IDs in the DAG run
"""
from airflow.models.expandinput import NotFullyPopulated
from airflow.models.mappedoperator import get_mapped_ti_count
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = set()
for ti in tis:
ti_mutation_hook(ti)
task_ids.add(ti.task_id)
try:
task = dag.get_task(ti.task_id)
should_restore_task = (task is not None) and ti.state == TaskInstanceState.REMOVED
if should_restore_task:
self.log.info("Restoring task '%s' which was previously removed from DAG '%s'", ti, dag)
Stats.incr(f"task_restored_to_dag.{dag.dag_id}", tags=self.stats_tags)
# Same metric with tagging
Stats.incr("task_restored_to_dag", tags={**self.stats_tags, "dag_id": dag.dag_id})
ti.state = None
except AirflowException:
if ti.state == TaskInstanceState.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state != DagRunState.RUNNING and not dag.partial:
self.log.warning("Failed to get task '%s' for dag '%s'. Marking it as removed.", ti, dag)
Stats.incr(f"task_removed_from_dag.{dag.dag_id}", tags=self.stats_tags)
# Same metric with tagging
Stats.incr("task_removed_from_dag", tags={**self.stats_tags, "dag_id": dag.dag_id})
ti.state = TaskInstanceState.REMOVED
continue
try:
num_mapped_tis = task.get_parse_time_mapped_ti_count()
except NotMapped:
continue
except NotFullyPopulated:
# What if it is _now_ dynamically mapped, but wasn't before?
try:
total_length = get_mapped_ti_count(task, self.run_id, session=session)
except NotFullyPopulated:
# Not all upstreams finished, so we can't tell what should be here. Remove everything.
if ti.map_index >= 0:
self.log.debug(
"Removing the unmapped TI '%s' as the mapping can't be resolved yet", ti
)
ti.state = TaskInstanceState.REMOVED
continue
# Upstreams finished, check there aren't any extras
if ti.map_index >= total_length:
self.log.debug(
"Removing task '%s' as the map_index is longer than the resolved mapping list (%d)",
ti,
total_length,
)
ti.state = TaskInstanceState.REMOVED
else:
# Check if the number of mapped literals has changed, and we need to mark this TI as removed.
if ti.map_index >= num_mapped_tis:
self.log.debug(
"Removing task '%s' as the map_index is longer than the literal mapping list (%s)",
ti,
num_mapped_tis,
)
ti.state = TaskInstanceState.REMOVED
elif ti.map_index < 0:
self.log.debug("Removing the unmapped TI '%s' as the mapping can now be performed", ti)
ti.state = TaskInstanceState.REMOVED
return task_ids
@overload
def _get_task_creator(
self,
created_counts: dict[str, int],
ti_mutation_hook: Callable,
hook_is_noop: Literal[True],
dag_version_id: UUIDType,
) -> Callable[[Operator, Iterable[int]], Iterator[dict[str, Any]]]: ...
@overload
def _get_task_creator(
self,
created_counts: dict[str, int],
ti_mutation_hook: Callable,
hook_is_noop: Literal[False],
dag_version_id: UUIDType,
) -> Callable[[Operator, Iterable[int]], Iterator[TI]]: ...
def _get_task_creator(
self,
created_counts: dict[str, int],
ti_mutation_hook: Callable,
hook_is_noop: Literal[True, False],
dag_version_id: UUIDType,
) -> Callable[[Operator, Iterable[int]], Iterator[dict[str, Any]] | Iterator[TI]]:
"""
Get the task creator function.
This function also updates the created_counts dictionary with the number of tasks created.
:param created_counts: Dictionary of task_type -> count of created TIs
:param ti_mutation_hook: task_instance_mutation_hook function
:param hook_is_noop: Whether the task_instance_mutation_hook is a noop
"""
if hook_is_noop:
def create_ti_mapping(task: Operator, indexes: Iterable[int]) -> Iterator[dict[str, Any]]:
created_counts[task.task_type] += 1
for map_index in indexes:
yield TI.insert_mapping(
self.run_id, task, map_index=map_index, dag_version_id=dag_version_id
)
creator = create_ti_mapping
else:
def create_ti(task: Operator, indexes: Iterable[int]) -> Iterator[TI]:
for map_index in indexes:
ti = TI(task, run_id=self.run_id, map_index=map_index, dag_version_id=dag_version_id)
ti_mutation_hook(ti)
if ti.operator:
created_counts[ti.operator] += 1
yield ti
creator = create_ti
return creator
def _create_tasks(
self,
tasks: Iterable[Operator],
task_creator: Callable[[Operator, Iterable[int]], CreatedTasks],
*,
session: Session,
) -> CreatedTasks:
"""
Create missing tasks -- and expand any MappedOperator that _only_ have literals as input.
:param tasks: Tasks to create jobs for in the DAG run
:param task_creator: Function to create task instances
"""
from airflow.models.expandinput import NotFullyPopulated
from airflow.models.mappedoperator import get_mapped_ti_count
map_indexes: Iterable[int]
for task in tasks:
try:
count = get_mapped_ti_count(task, self.run_id, session=session)
except (NotMapped, NotFullyPopulated):
map_indexes = (-1,)
else:
if count:
map_indexes = range(count)
else:
# Make sure to always create at least one ti; this will be
# marked as REMOVED later at runtime.
map_indexes = (-1,)
yield from task_creator(task, map_indexes)
def _create_task_instances(
self,
dag_id: str,
tasks: Iterator[dict[str, Any]] | Iterator[TI],
created_counts: dict[str, int],
hook_is_noop: bool,
*,
session: Session,
) -> None:
"""
Create the necessary task instances from the given tasks.
:param dag_id: DAG ID associated with the dagrun
:param tasks: the tasks to create the task instances from
:param created_counts: a dictionary of number of tasks -> total ti created by the task creator
:param hook_is_noop: whether the task_instance_mutation_hook is noop
:param session: the session to use
"""
# Fetch the information we need before handling the exception to avoid
# PendingRollbackError due to the session being invalidated on exception
# see https://github.com/apache/superset/pull/530
run_id = self.run_id
try:
if hook_is_noop:
session.bulk_insert_mappings(TI.__mapper__, tasks)
else:
session.bulk_save_objects(tasks)
for task_type, count in created_counts.items():
Stats.incr(f"task_instance_created_{task_type}", count, tags=self.stats_tags)
# Same metric with tagging
Stats.incr("task_instance_created", count, tags={**self.stats_tags, "task_type": task_type})
session.flush()
except IntegrityError:
self.log.info(
"Hit IntegrityError while creating the TIs for %s- %s",
dag_id,
run_id,
exc_info=True,
)
self.log.info("Doing session rollback.")
# TODO[HA]: We probably need to savepoint this so we can keep the transaction alive.
session.rollback()
def _revise_map_indexes_if_mapped(
self, task: Operator, *, dag_version_id: UUIDType, session: Session
) -> Iterator[TI]:
"""
Check if task increased or reduced in length and handle appropriately.
Task instances that do not already exist are created and returned if
possible. Expansion only happens if all upstreams are ready; otherwise
we delay expansion to the "last resort". See comments at the call site
for more details.
"""
from airflow.models.expandinput import NotFullyPopulated
from airflow.models.mappedoperator import get_mapped_ti_count
from airflow.settings import task_instance_mutation_hook
try:
total_length = get_mapped_ti_count(task, self.run_id, session=session)
except NotMapped:
return # Not a mapped task, don't need to do anything.
except NotFullyPopulated:
return # Upstreams not ready, don't need to revise this yet.
query = session.scalars(
select(TI.map_index).where(
TI.dag_id == self.dag_id,
TI.task_id == task.task_id,
TI.run_id == self.run_id,
)
)
existing_indexes = set(query)
removed_indexes = existing_indexes.difference(range(total_length))
if removed_indexes:
session.execute(
update(TI)
.where(
TI.dag_id == self.dag_id,
TI.task_id == task.task_id,
TI.run_id == self.run_id,
TI.map_index.in_(removed_indexes),
)
.values(state=TaskInstanceState.REMOVED)
)
session.flush()
for index in range(total_length):
if index in existing_indexes:
continue
ti = TI(task, run_id=self.run_id, map_index=index, state=None, dag_version_id=dag_version_id)
self.log.debug("Expanding TIs upserted %s", ti)
task_instance_mutation_hook(ti)
ti = session.merge(ti)
ti.refresh_from_task(task)
session.flush()
yield ti
@classmethod
@provide_session
def get_latest_runs(cls, session: Session = NEW_SESSION) -> list[DagRun]:
"""Return the latest DagRun for each DAG."""
subquery = (
select(cls.dag_id, func.max(cls.logical_date).label("logical_date"))
.group_by(cls.dag_id)
.subquery()
)
return list(
session.scalars(
select(cls).join(
subquery,
and_(cls.dag_id == subquery.c.dag_id, cls.logical_date == subquery.c.logical_date),
)
).all()
)
@provide_session
def schedule_tis(
self,
schedulable_tis: Iterable[TI],
session: Session = NEW_SESSION,
max_tis_per_query: int | None = None,
) -> int:
"""
Set the given task instances in to the scheduled state.
Each element of ``schedulable_tis`` should have its ``task`` attribute already set.
Any EmptyOperator without ``on_execute_callback`` or ``on_success_callback`` or ``inlets`` or
``outlets`` is instead set straight to the success state, without execution.
All the TIs should belong to this DagRun, but this code is in the hot-path, this is not checked -- it
is the caller's responsibility to call this function only with TIs from a single dag run.
"""
# Get list of TI IDs that do not need to executed, these are
# tasks using EmptyOperator and without on_execute_callback / on_success_callback
empty_ti_ids: list[str] = []
schedulable_ti_ids: list[str] = []
for ti in schedulable_tis:
if ti.is_schedulable:
schedulable_ti_ids.append(ti.id)
# Check "start_trigger_args" to see whether the operator supports
# start execution from triggerer. If so, we'll check "start_from_trigger"
# to see whether this feature is turned on and defer this task.
# If not, we'll add this "ti" into "schedulable_ti_ids" and later
# execute it to run in the worker.
# TODO TaskSDK: This is disabled since we haven't figured out how
# to render start_from_trigger in the scheduler. If we need to
# render the value in a worker, it kind of defeats the purpose of
# this feature (which is to save a worker process if possible).
# elif task.start_trigger_args is not None:
# if task.expand_start_from_trigger(context=ti.get_template_context()):
# ti.start_date = timezone.utcnow()
# if ti.state != TaskInstanceState.UP_FOR_RESCHEDULE:
# ti.try_number += 1
# ti.defer_task(exception=None, session=session)
# else:
# schedulable_ti_ids.append(ti.id)
else:
empty_ti_ids.append(ti.id)
count = 0
if schedulable_ti_ids:
schedulable_ti_ids_chunks = chunks(
schedulable_ti_ids, max_tis_per_query or len(schedulable_ti_ids)
)
for id_chunk in schedulable_ti_ids_chunks:
result = session.execute(
update(TI)
.where(TI.id.in_(id_chunk))
.values(
state=TaskInstanceState.SCHEDULED,
scheduled_dttm=timezone.utcnow(),
try_number=case(
(
or_(TI.state.is_(None), TI.state != TaskInstanceState.UP_FOR_RESCHEDULE),
TI.try_number + 1,
),
else_=TI.try_number,
),
)
.execution_options(synchronize_session=False)
)
count += getattr(result, "rowcount", 0)
# Tasks using EmptyOperator should not be executed, mark them as success
if empty_ti_ids:
dummy_ti_ids_chunks = chunks(empty_ti_ids, max_tis_per_query or len(empty_ti_ids))
for id_chunk in dummy_ti_ids_chunks:
result = session.execute(
update(TI)
.where(TI.id.in_(id_chunk))
.values(
state=TaskInstanceState.SUCCESS,
start_date=timezone.utcnow(),
end_date=timezone.utcnow(),
duration=0,
try_number=TI.try_number + 1,
)
.execution_options(
synchronize_session=False,
)
)
count += getattr(result, "rowcount", 0)
return count
@provide_session
def get_log_template(self, *, session: Session = NEW_SESSION) -> LogTemplate:
return DagRun._get_log_template(log_template_id=self.log_template_id, session=session)
@staticmethod
@provide_session
def _get_log_template(log_template_id: int | None, session: Session = NEW_SESSION) -> LogTemplate:
template: LogTemplate | None
if log_template_id is None: # DagRun created before LogTemplate introduction.
template = session.scalar(select(LogTemplate).order_by(LogTemplate.id).limit(1))
else:
template = session.get(LogTemplate, log_template_id)
if template is None:
raise AirflowException(
f"No log_template entry found for ID {log_template_id!r}. "
f"Please make sure you set up the metadatabase correctly."
)
return template
@staticmethod
def _get_partial_task_ids(dag: SerializedDAG | None) -> list[str] | None:
return dag.task_ids if dag and dag.partial else None
|
DagRun
|
python
|
readthedocs__readthedocs.org
|
readthedocs/organizations/views/public.py
|
{
"start": 4273,
"end": 4538
}
|
class ____(CheckOrganizationsEnabled, GenericView):
"""Redirect invitation links to the new view."""
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse("invitations_redeem", args=[kwargs["hash"]]))
|
RedirectRedeemTeamInvitation
|
python
|
pennersr__django-allauth
|
allauth/account/forms.py
|
{
"start": 24887,
"end": 25417
}
|
class ____(PasswordVerificationMixin, forms.Form):
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", None)
self.temp_key = kwargs.pop("temp_key", None)
super().__init__(*args, **kwargs)
self.fields["password1"].user = self.user
def save(self):
flows.password_reset.reset_password(self.user, self.cleaned_data["password1"])
|
ResetPasswordKeyForm
|
python
|
pytorch__pytorch
|
benchmarks/gpt_fast/mixtral_moe_quantize.py
|
{
"start": 4446,
"end": 5307
}
|
class ____(torch.nn.Module):
__constants__ = ["in_features", "out_features"]
in_features: int
out_features: int
weight: torch.Tensor
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
target_dtype=None,
) -> None:
assert target_dtype is not None
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.register_buffer(
"weight", torch.empty((out_features, in_features), dtype=target_dtype)
)
self.register_buffer("scales", torch.ones(out_features, dtype=torch.bfloat16))
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.weight.to(dtype=input.dtype)) * self.scales
|
WeightOnlyInt8Linear
|
python
|
doocs__leetcode
|
solution/2200-2299/2299.Strong Password Checker II/Solution.py
|
{
"start": 0,
"end": 499
}
|
class ____:
def strongPasswordCheckerII(self, password: str) -> bool:
if len(password) < 8:
return False
mask = 0
for i, c in enumerate(password):
if i and c == password[i - 1]:
return False
if c.islower():
mask |= 1
elif c.isupper():
mask |= 2
elif c.isdigit():
mask |= 4
else:
mask |= 8
return mask == 15
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/isinstance3.py
|
{
"start": 1805,
"end": 2366
}
|
class ____(TypedDict):
a: int
# This should generate an error because TypedDict classes can't
# be used in an isinstance call.
if isinstance(a, TD1):
pass
TA1 = Annotated[int, ""]
# This should generate two errors because Annotated can't be used
# in an isinstance call.
if isinstance(1, TA1):
pass
# This should generate an error because Any can't be used
# in an isinstance call.
if isinstance(1, Any):
pass
# This should generate an error because Literal can't be used
# in an isinstance call.
if isinstance(1, Literal[1, 2]):
pass
|
TD1
|
python
|
davidhalter__jedi
|
jedi/api/completion.py
|
{
"start": 4385,
"end": 28417
}
|
class ____:
def __init__(self, inference_state, module_context, code_lines, position,
signatures_callback, fuzzy=False):
self._inference_state = inference_state
self._module_context = module_context
self._module_node = module_context.tree_node
self._code_lines = code_lines
# The first step of completions is to get the name
self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position)
# The actual cursor position is not what we need to calculate
# everything. We want the start of the name we're on.
self._original_position = position
self._signatures_callback = signatures_callback
self._fuzzy = fuzzy
# Return list of completions in this order:
# - Beginning with what user is typing
# - Public (alphabet)
# - Private ("_xxx")
# - Dunder ("__xxx")
def complete(self):
leaf = self._module_node.get_leaf_for_position(
self._original_position,
include_prefixes=True
)
string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position)
prefixed_completions = complete_dict(
self._module_context,
self._code_lines,
start_leaf or leaf,
self._original_position,
None if string is None else quote + string,
fuzzy=self._fuzzy,
)
if string is not None and not prefixed_completions:
prefixed_completions = list(complete_file_name(
self._inference_state, self._module_context, start_leaf, quote, string,
self._like_name, self._signatures_callback,
self._code_lines, self._original_position,
self._fuzzy
))
if string is not None:
if not prefixed_completions and '\n' in string:
# Complete only multi line strings
prefixed_completions = self._complete_in_string(start_leaf, string)
return prefixed_completions
cached_name, completion_names = self._complete_python(leaf)
imported_names = []
if leaf.parent is not None and leaf.parent.type in ['import_as_names', 'dotted_as_names']:
imported_names.extend(extract_imported_names(leaf.parent))
completions = list(filter_names(self._inference_state, completion_names,
self.stack, self._like_name,
self._fuzzy, imported_names, cached_name=cached_name))
return (
# Removing duplicates mostly to remove False/True/None duplicates.
_remove_duplicates(prefixed_completions, completions)
+ sorted(completions, key=lambda x: (not x.name.startswith(self._like_name),
x.name.startswith('__'),
x.name.startswith('_'),
x.name.lower()))
)
def _complete_python(self, leaf):
"""
Analyzes the current context of a completion and decides what to
return.
Technically this works by generating a parser stack and analysing the
current stack for possible grammar nodes.
Possible enhancements:
- global/nonlocal search global
- yield from / raise from <- could be only exceptions/generators
- In args: */**: no completion
- In params (also lambda): no completion before =
"""
grammar = self._inference_state.grammar
self.stack = stack = None
self._position = (
self._original_position[0],
self._original_position[1] - len(self._like_name)
)
cached_name = None
try:
self.stack = stack = helpers.get_stack_at_position(
grammar, self._code_lines, leaf, self._position
)
except helpers.OnErrorLeaf as e:
value = e.error_leaf.value
if value == '.':
# After ErrorLeaf's that are dots, we will not do any
# completions since this probably just confuses the user.
return cached_name, []
# If we don't have a value, just use global completion.
return cached_name, self._complete_global_scope()
allowed_transitions = \
list(stack._allowed_transition_names_and_token_types())
if 'if' in allowed_transitions:
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
previous_leaf = leaf.get_previous_leaf()
indent = self._position[1]
if not (leaf.start_pos <= self._position <= leaf.end_pos):
indent = leaf.start_pos[1]
if previous_leaf is not None:
stmt = previous_leaf
while True:
stmt = search_ancestor(
stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
'error_node',
)
if stmt is None:
break
type_ = stmt.type
if type_ == 'error_node':
first = stmt.children[0]
if isinstance(first, Leaf):
type_ = first.value + '_stmt'
# Compare indents
if stmt.start_pos[1] == indent:
if type_ == 'if_stmt':
allowed_transitions += ['elif', 'else']
elif type_ == 'try_stmt':
allowed_transitions += ['except', 'finally', 'else']
elif type_ == 'for_stmt':
allowed_transitions.append('else')
completion_names = []
kwargs_only = False
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
PythonTokenTypes.INDENT)):
# This means that we actually have to do type inference.
nonterminals = [stack_node.nonterminal for stack_node in stack]
nodes = _gather_nodes(stack)
if nodes and nodes[-1] in ('as', 'def', 'class'):
# No completions for ``with x as foo`` and ``import x as foo``.
# Also true for defining names as a class or function.
return cached_name, list(self._complete_inherited(is_function=True))
elif "import_stmt" in nonterminals:
level, names = parse_dotted_names(nodes, "import_from" in nonterminals)
only_modules = not ("import_from" in nonterminals and 'import' in nodes)
completion_names += self._get_importer_names(
names,
level,
only_modules=only_modules,
)
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
dot = self._module_node.get_leaf_for_position(self._position)
if dot.type == "endmarker":
# This is a bit of a weird edge case, maybe we can somehow
# generalize this.
dot = leaf.get_previous_leaf()
cached_name, n = self._complete_trailer(dot.get_previous_leaf())
completion_names += n
elif self._is_parameter_completion():
completion_names += self._complete_params(leaf)
else:
# Apparently this looks like it's good enough to filter most cases
# so that signature completions don't randomly appear.
# To understand why this works, three things are important:
# 1. trailer with a `,` in it is either a subscript or an arglist.
# 2. If there's no `,`, it's at the start and only signatures start
# with `(`. Other trailers could start with `.` or `[`.
# 3. Decorators are very primitive and have an optional `(` with
# optional arglist in them.
if nodes[-1] in ['(', ','] \
and nonterminals[-1] in ('trailer', 'arglist', 'decorator'):
signatures = self._signatures_callback(*self._position)
if signatures:
call_details = signatures[0]._call_details
used_kwargs = list(call_details.iter_used_keyword_arguments())
positional_count = call_details.count_positional_arguments()
completion_names += _get_signature_param_names(
signatures,
positional_count,
used_kwargs,
)
kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs)
if not kwargs_only:
completion_names += self._complete_global_scope()
completion_names += self._complete_inherited(is_function=False)
if not kwargs_only:
current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
completion_names += self._complete_keywords(
allowed_transitions,
only_values=not (not current_line or current_line[-1] in ' \t.;'
and current_line[-3:] != '...')
)
return cached_name, completion_names
def _is_parameter_completion(self):
tos = self.stack[-1]
if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1:
# We are at the position `lambda `, where basically the next node
# is a param.
return True
if tos.nonterminal in 'parameters':
# Basically we are at the position `foo(`, there's nothing there
# yet, so we have no `typedargslist`.
return True
# var args is for lambdas and typed args for normal functions
return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ','
def _complete_params(self, leaf):
stack_node = self.stack[-2]
if stack_node.nonterminal == 'parameters':
stack_node = self.stack[-3]
if stack_node.nonterminal == 'funcdef':
context = get_user_context(self._module_context, self._position)
node = search_ancestor(leaf, 'error_node', 'funcdef')
if node is not None:
if node.type == 'error_node':
n = node.children[0]
if n.type == 'decorators':
decorators = n.children
elif n.type == 'decorator':
decorators = [n]
else:
decorators = []
else:
decorators = node.get_decorators()
function_name = stack_node.nodes[1]
return complete_param_names(context, function_name.value, decorators)
return []
def _complete_keywords(self, allowed_transitions, only_values):
for k in allowed_transitions:
if isinstance(k, str) and k.isalpha():
if not only_values or k in ('True', 'False', 'None'):
yield keywords.KeywordName(self._inference_state, k)
def _complete_global_scope(self):
context = get_user_context(self._module_context, self._position)
debug.dbg('global completion scope: %s', context)
flow_scope_node = get_flow_scope_node(self._module_node, self._position)
filters = get_global_filters(
context,
self._position,
flow_scope_node
)
completion_names = []
for filter in filters:
completion_names += filter.values()
return completion_names
def _complete_trailer(self, previous_leaf):
inferred_context = self._module_context.create_context(previous_leaf)
values = infer_call_of_leaf(inferred_context, previous_leaf)
debug.dbg('trailer completion values: %s', values, color='MAGENTA')
# The cached name simply exists to make speed optimizations for certain
# modules.
cached_name = None
if len(values) == 1:
v, = values
if v.is_module():
if len(v.string_names) == 1:
module_name = v.string_names[0]
if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'):
cached_name = module_name
return cached_name, self._complete_trailer_for_values(values)
def _complete_trailer_for_values(self, values):
user_context = get_user_context(self._module_context, self._position)
return complete_trailer(user_context, values)
def _get_importer_names(self, names, level=0, only_modules=True):
names = [n.value for n in names]
i = imports.Importer(self._inference_state, names, self._module_context, level)
return i.completion_names(self._inference_state, only_modules=only_modules)
def _complete_inherited(self, is_function=True):
"""
Autocomplete inherited methods when overriding in child class.
"""
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
cls = tree.search_ancestor(leaf, 'classdef')
if cls is None:
return
# Complete the methods that are defined in the super classes.
class_value = self._module_context.create_value(cls)
if cls.start_pos[1] >= leaf.start_pos[1]:
return
filters = class_value.get_filters(is_instance=True)
# The first dict is the dictionary of class itself.
next(filters)
for filter in filters:
for name in filter.values():
# TODO we should probably check here for properties
if (name.api_type == 'function') == is_function:
yield name
def _complete_in_string(self, start_leaf, string):
"""
To make it possible for people to have completions in doctests or
generally in "Python" code in docstrings, we use the following
heuristic:
- Having an indented block of code
- Having some doctest code that starts with `>>>`
- Having backticks that doesn't have whitespace inside it
"""
def iter_relevant_lines(lines):
include_next_line = False
for l in code_lines:
if include_next_line or l.startswith('>>>') or l.startswith(' '):
yield re.sub(r'^( *>>> ?| +)', '', l)
else:
yield None
include_next_line = bool(re.match(' *>>>', l))
string = dedent(string)
code_lines = split_lines(string, keepends=True)
relevant_code_lines = list(iter_relevant_lines(code_lines))
if relevant_code_lines[-1] is not None:
# Some code lines might be None, therefore get rid of that.
relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines]
return self._complete_code_lines(relevant_code_lines)
match = re.search(r'`([^`\s]+)', code_lines[-1])
if match:
return self._complete_code_lines([match.group(1)])
return []
def _complete_code_lines(self, code_lines):
module_node = self._inference_state.grammar.parse(''.join(code_lines))
module_value = DocstringModule(
in_module_context=self._module_context,
inference_state=self._inference_state,
module_node=module_node,
code_lines=code_lines,
)
return Completion(
self._inference_state,
module_value.as_context(),
code_lines=code_lines,
position=module_node.end_pos,
signatures_callback=lambda *args, **kwargs: [],
fuzzy=self._fuzzy
).complete()
def _gather_nodes(stack):
nodes = []
for stack_node in stack:
if stack_node.dfa.from_rule == 'small_stmt':
nodes = []
else:
nodes += stack_node.nodes
return nodes
_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")')
def _extract_string_while_in_string(leaf, position):
def return_part_of_leaf(leaf):
kwargs = {}
if leaf.line == position[0]:
kwargs['endpos'] = position[1] - leaf.column
match = _string_start.match(leaf.value, **kwargs)
if not match:
return None, None, None
start = match.group(0)
if leaf.line == position[0] and position[1] < leaf.column + match.end():
return None, None, None
return cut_value_at_position(leaf, position)[match.end():], leaf, start
if position < leaf.start_pos:
return None, None, None
if leaf.type == 'string':
return return_part_of_leaf(leaf)
leaves = []
while leaf is not None:
if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value):
if len(leaf.value) > 1:
return return_part_of_leaf(leaf)
prefix_leaf = None
if not leaf.prefix:
prefix_leaf = leaf.get_previous_leaf()
if prefix_leaf is None or prefix_leaf.type != 'name' \
or not all(c in 'rubf' for c in prefix_leaf.value.lower()):
prefix_leaf = None
return (
''.join(cut_value_at_position(l, position) for l in leaves),
prefix_leaf or leaf,
('' if prefix_leaf is None else prefix_leaf.value)
+ cut_value_at_position(leaf, position),
)
if leaf.line != position[0]:
# Multi line strings are always simple error leaves and contain the
# whole string, single line error leaves are atherefore important
# now and since the line is different, it's not really a single
# line string anymore.
break
leaves.insert(0, leaf)
leaf = leaf.get_previous_leaf()
return None, None, None
def complete_trailer(user_context, values):
completion_names = []
for value in values:
for filter in value.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
if not value.is_stub() and isinstance(value, TreeInstance):
completion_names += _complete_getattr(user_context, value)
python_values = convert_values(values)
for c in python_values:
if c not in values:
for filter in c.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
return completion_names
def _complete_getattr(user_context, instance):
"""
A heuristic to make completion for proxy objects work. This is not
intended to work in all cases. It works exactly in this case:
def __getattr__(self, name):
...
return getattr(any_object, name)
It is important that the return contains getattr directly, otherwise it
won't work anymore. It's really just a stupid heuristic. It will not
work if you write e.g. `return (getatr(o, name))`, because of the
additional parentheses. It will also not work if you move the getattr
to some other place that is not the return statement itself.
It is intentional that it doesn't work in all cases. Generally it's
really hard to do even this case (as you can see below). Most people
will write it like this anyway and the other ones, well they are just
out of luck I guess :) ~dave.
"""
names = (instance.get_function_slot_names('__getattr__')
or instance.get_function_slot_names('__getattribute__'))
functions = ValueSet.from_sets(
name.infer()
for name in names
)
for func in functions:
tree_node = func.tree_node
if tree_node is None or tree_node.type != 'funcdef':
continue
for return_stmt in tree_node.iter_return_stmts():
# Basically until the next comment we just try to find out if a
# return statement looks exactly like `return getattr(x, name)`.
if return_stmt.type != 'return_stmt':
continue
atom_expr = return_stmt.children[1]
if atom_expr.type != 'atom_expr':
continue
atom = atom_expr.children[0]
trailer = atom_expr.children[1]
if len(atom_expr.children) != 2 or atom.type != 'name' \
or atom.value != 'getattr':
continue
arglist = trailer.children[1]
if arglist.type != 'arglist' or len(arglist.children) < 3:
continue
context = func.as_context()
object_node = arglist.children[0]
# Make sure it's a param: foo in __getattr__(self, foo)
name_node = arglist.children[2]
name_list = context.goto(name_node, name_node.start_pos)
if not any(n.api_type == 'param' for n in name_list):
continue
# Now that we know that these are most probably completion
# objects, we just infer the object and return them as
# completions.
objects = context.infer_node(object_node)
return complete_trailer(user_context, objects)
return []
def search_in_module(inference_state, module_context, names, wanted_names,
wanted_type, complete=False, fuzzy=False,
ignore_imports=False, convert=False):
for s in wanted_names[:-1]:
new_names = []
for n in names:
if s == n.string_name:
if n.tree_name is not None and n.api_type in ('module', 'namespace') \
and ignore_imports:
continue
new_names += complete_trailer(
module_context,
n.infer()
)
debug.dbg('dot lookup on search %s from %s', new_names, names[:10])
names = new_names
last_name = wanted_names[-1].lower()
for n in names:
string = n.string_name.lower()
if complete and helpers.match(string, last_name, fuzzy=fuzzy) \
or not complete and string == last_name:
if isinstance(n, SubModuleName):
names = [v.name for v in n.infer()]
else:
names = [n]
if convert:
names = convert_names(names)
for n2 in names:
if complete:
def_ = classes.Completion(
inference_state, n2,
stack=None,
like_name_length=len(last_name),
is_fuzzy=fuzzy,
)
else:
def_ = classes.Name(inference_state, n2)
if not wanted_type or wanted_type == def_.type:
yield def_
def extract_imported_names(node):
imported_names = []
if node.type in ['import_as_names', 'dotted_as_names', 'dotted_as_name', 'import_as_name']:
for index, child in enumerate(node.children):
if child.type == 'name':
if (index > 1 and node.children[index - 1].type == "keyword"
and node.children[index - 1].value == "as"):
continue
imported_names.append(child.value)
elif child.type in ('import_as_name', 'dotted_as_name'):
imported_names.extend(extract_imported_names(child))
return imported_names
|
Completion
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-kth-character-in-expanded-string.py
|
{
"start": 38,
"end": 430
}
|
class ____(object):
def kthCharacter(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
l = 0
for i in xrange(len(s)):
if s[i] == ' ':
l = 0
k -= 1
else:
l += 1
k -= l
if k < 0:
break
return s[i]
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py
|
{
"start": 20883,
"end": 21228
}
|
class ____(TestTicketingIssueAlertHandlerBase):
def setUp(self) -> None:
super().setUp()
self.handler = JiraIssueAlertHandler()
def test_build_rule_action_blob(self) -> None:
for expected in JIRA_ACTION_DATA_BLOBS:
self._test_build_rule_action_blob(expected, Action.Type.JIRA)
|
TestJiraIssueAlertHandler
|
python
|
scikit-learn__scikit-learn
|
sklearn/ensemble/_bagging.py
|
{
"start": 42617,
"end": 53263
}
|
class ____(RegressorMixin, BaseBagging):
"""A Bagging regressor.
A Bagging regressor is an ensemble meta-estimator that fits base
regressors each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
.. versionadded:: 0.15
Parameters
----------
estimator : object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a
:class:`~sklearn.tree.DecisionTreeRegressor`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=1.0
The number of samples to draw from X to train each base estimator (with
replacement by default, see `bootstrap` for more details).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator (
without replacement by default, see `bootstrap_features` for more
details).
- If int, then draw `max_features` features.
- If float, then draw `max(1, int(max_features * n_features_in_))` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement. If False, sampling without
replacement is performed. If fitting with `sample_weight`, it is
strongly recommended to choose True, as only drawing with replacement
will ensure the expected frequency semantics of `sample_weight`.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error. Only available if bootstrap=True.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the random resampling of the original dataset
(sample wise and feature wise).
If the base estimator accepts a `random_state` attribute, a different
seed is generated for each instance in the ensemble.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
estimators_ : list of estimators
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,)
Prediction computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_prediction_` might contain NaN. This attribute exists only
when ``oob_score`` is True.
See Also
--------
BaggingClassifier : A Bagging classifier.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
Examples
--------
>>> from sklearn.svm import SVR
>>> from sklearn.ensemble import BaggingRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=100, n_features=4,
... n_informative=2, n_targets=1,
... random_state=0, shuffle=False)
>>> regr = BaggingRegressor(estimator=SVR(),
... n_estimators=10, random_state=0).fit(X, y)
>>> regr.predict([[0, 0, 0, 0]])
array([-2.8720])
"""
def __init__(
self,
estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
def predict(self, X, **params):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict` method of the sub-estimators via the
metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
_raise_for_params(params, self, "predict")
check_is_fitted(self)
# Check data
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
reset=False,
)
if _routing_enabled():
routed_params = process_routing(self, "predict", **params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(predict=Bunch())
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
params=routed_params.estimator.predict,
)
for i in range(n_jobs)
)
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples,))
n_predictions = np.zeros((n_samples,))
for estimator, samples, features in zip(
self.estimators_, self.estimators_samples_, self.estimators_features_
):
# Create mask for OOB samples
mask = ~indices_to_mask(samples, n_samples)
predictions[mask] += estimator.predict((X[mask, :])[:, features])
n_predictions[mask] += 1
if (n_predictions == 0).any():
warn(
"Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates."
)
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
self.oob_score_ = r2_score(y, predictions)
def _get_estimator(self):
"""Resolve which estimator to return (default is DecisionTreeClassifier)"""
if self.estimator is None:
return DecisionTreeRegressor()
return self.estimator
|
BaggingRegressor
|
python
|
plotly__plotly.py
|
plotly/graph_objs/choroplethmapbox/unselected/_marker.py
|
{
"start": 233,
"end": 2399
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmapbox.unselected"
_path_str = "choroplethmapbox.unselected.marker"
_valid_props = {"opacity"}
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def _prop_descriptions(self):
return """\
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, opacity=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choroplethmapb
ox.unselected.Marker`
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmapbox.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.unselected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("opacity", arg, opacity)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Marker
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/asset_graph.py
|
{
"start": 57342,
"end": 57558
}
|
class ____(graphene.ObjectType):
class Meta:
name = "CodeLocation"
repositoryName = graphene.NonNull(graphene.String)
repositoryLocationName = graphene.NonNull(graphene.String)
|
GrapheneCodeLocation
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_serialized_dag.py
|
{
"start": 2813,
"end": 28165
}
|
class ____:
"""Unit tests for SerializedDagModel."""
@pytest.fixture(
autouse=True,
params=[
pytest.param(False, id="raw-serialized_dags"),
pytest.param(True, id="compress-serialized_dags"),
],
)
def setup_test_cases(self, request, monkeypatch):
db.clear_db_dags()
db.clear_db_runs()
db.clear_db_serialized_dags()
with mock.patch("airflow.models.serialized_dag.COMPRESS_SERIALIZED_DAGS", request.param):
yield
db.clear_db_serialized_dags()
def _write_example_dags(self):
example_dags = make_example_dags(example_dags_module)
for dag in example_dags.values():
SDM.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name="testing")
return example_dags
def test_write_dag(self, testing_dag_bundle):
"""DAGs can be written into database"""
example_dags = self._write_example_dags()
with create_session() as session:
for dag in example_dags.values():
assert SDM.has_dag(dag.dag_id)
result = session.query(SDM).filter(SDM.dag_id == dag.dag_id).one()
assert result.dag_version.dag_code.fileloc == dag.fileloc
# Verifies JSON schema.
SerializedDAG.validate_schema(result.data)
def test_write_dag_when_python_callable_name_changes(self, dag_maker, session):
def my_callable():
pass
with dag_maker("dag1"):
PythonOperator(task_id="task1", python_callable=my_callable)
dag_maker.create_dagrun(run_id="test1")
with dag_maker("dag1"):
PythonOperator(task_id="task1", python_callable=lambda x: None)
dag_maker.create_dagrun(run_id="test2", logical_date=pendulum.datetime(2025, 1, 1))
assert len(session.query(DagVersion).all()) == 2
with dag_maker("dag2"):
@task_decorator
def my_callable():
pass
my_callable()
dag_maker.create_dagrun(run_id="test3", logical_date=pendulum.datetime(2025, 1, 2))
with dag_maker("dag2"):
@task_decorator
def my_callable2():
pass
my_callable2()
assert len(session.query(DagVersion).all()) == 4
def test_serialized_dag_is_updated_if_dag_is_changed(self, testing_dag_bundle):
"""Test Serialized DAG is updated if DAG is changed"""
example_dags = make_example_dags(example_dags_module)
example_bash_op_dag = example_dags.get("example_bash_operator")
dag_updated = SDM.write_dag(
dag=LazyDeserializedDAG.from_dag(example_bash_op_dag),
bundle_name="testing",
)
assert dag_updated is True
s_dag = SDM.get(example_bash_op_dag.dag_id)
s_dag.dag.create_dagrun(
run_id="test1",
run_after=pendulum.datetime(2025, 1, 1, tz="UTC"),
state=DagRunState.QUEUED,
triggered_by=DagRunTriggeredByType.TEST,
run_type=DagRunType.MANUAL,
)
# Test that if DAG is not changed, Serialized DAG is not re-written and last_updated
# column is not updated
dag_updated = SDM.write_dag(
dag=LazyDeserializedDAG.from_dag(example_bash_op_dag),
bundle_name="testing",
)
s_dag_1 = SDM.get(example_bash_op_dag.dag_id)
assert s_dag_1.dag_hash == s_dag.dag_hash
assert s_dag.created_at == s_dag_1.created_at
assert dag_updated is False
# Update DAG
example_bash_op_dag.tags.add("new_tag")
assert example_bash_op_dag.tags == {"example", "example2", "new_tag"}
dag_updated = SDM.write_dag(
dag=LazyDeserializedDAG.from_dag(example_bash_op_dag),
bundle_name="testing",
)
s_dag_2 = SDM.get(example_bash_op_dag.dag_id)
assert s_dag.created_at != s_dag_2.created_at
assert s_dag.dag_hash != s_dag_2.dag_hash
assert s_dag_2.data["dag"]["tags"] == ["example", "example2", "new_tag"]
assert dag_updated is True
def test_read_dags(self):
"""DAGs can be read from database."""
example_dags = self._write_example_dags()
serialized_dags = SDM.read_all_dags()
assert len(example_dags) == len(serialized_dags)
for dag_id, dag in example_dags.items():
serialized_dag = serialized_dags[dag_id]
assert serialized_dag.dag_id == dag.dag_id
assert set(serialized_dag.task_dict) == set(dag.task_dict)
def test_read_all_dags_only_picks_the_latest_serdags(self, session):
example_dags = self._write_example_dags()
serialized_dags = SDM.read_all_dags()
assert len(example_dags) == len(serialized_dags)
dag = example_dags.get("example_bash_operator")
SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag=dag)).create_dagrun(
run_id="test1",
run_after=pendulum.datetime(2025, 1, 1, tz="UTC"),
state=DagRunState.QUEUED,
triggered_by=DagRunTriggeredByType.TEST,
run_type=DagRunType.MANUAL,
)
dag.doc_md = "new doc string"
SDM.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name="testing")
serialized_dags2 = SDM.read_all_dags()
sdags = session.query(SDM).all()
# assert only the latest SDM is returned
assert len(sdags) != len(serialized_dags2)
def test_order_of_dag_params_is_stable(self):
"""
This asserts that we have logic in place which guarantees the order
of the params is maintained - even if the backend (e.g. MySQL) mutates
the serialized DAG JSON.
"""
example_dags = make_example_dags(example_dags_module)
example_params_trigger_ui = example_dags.get("example_params_trigger_ui")
before = list(example_params_trigger_ui.params.keys())
SDM.write_dag(LazyDeserializedDAG.from_dag(example_params_trigger_ui), bundle_name="testing")
retrieved_dag = SDM.get_dag("example_params_trigger_ui")
after = list(retrieved_dag.params.keys())
assert before == after
@pytest.mark.db_test
def test_order_of_deps_is_consistent(self, session):
"""
Previously the 'dag_dependencies' node in serialized dag was converted to list from set.
This caused the order, and thus the hash value, to be unreliable, which could produce
excessive dag parsing.
"""
db.clear_db_assets()
session.add_all([AssetModel(id=i, uri=f"test://asset{i}/", name=f"{i}") for i in range(1, 6)])
session.add_all([AssetModel(id=i, uri=f"test://asset{i}/", name=f"{i}*") for i in (0, 6)])
session.commit()
first_dag_hash = None
for _ in range(10):
with DAG(
dag_id="example",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=[
Asset(uri="test://asset1", name="1"),
Asset(uri="test://asset2", name="2"),
Asset(uri="test://asset3", name="3"),
Asset(uri="test://asset4", name="4"),
Asset(uri="test://asset5", name="5"),
],
) as dag6:
BashOperator(
task_id="any",
outlets=[Asset(uri="test://asset0", name="0*"), Asset(uri="test://asset6", name="6*")],
bash_command="sleep 5",
)
deps_order = [x["label"] for x in SerializedDAG.serialize_dag(dag6)["dag_dependencies"]]
# in below assert, 0 and 6 both come at end because "source" is different for them and source
# is the first field in DagDependency class
assert deps_order == ["1", "2", "3", "4", "5", "0*", "6*"]
# for good measure, let's check that the dag hash is consistent
dag_json = json.dumps(SerializedDAG.to_dict(dag6), sort_keys=True).encode("utf-8")
this_dag_hash = md5(dag_json).hexdigest()
# set first dag hash on first pass
if first_dag_hash is None:
first_dag_hash = this_dag_hash
# dag hash should not change without change in structure (we're in a loop)
assert this_dag_hash == first_dag_hash
db.clear_db_assets()
def test_example_dag_hashes_are_always_consistent(self, session):
"""
This test asserts that the hashes of the example dags are always consistent.
"""
def get_hash_set():
example_dags = self._write_example_dags()
ordered_example_dags = dict(sorted(example_dags.items()))
hashes = set()
dag_hash_map = {}
for dag_id in ordered_example_dags.keys():
smd = session.execute(select(SDM.dag_hash).where(SDM.dag_id == dag_id)).one()
hashes.add(smd.dag_hash)
dag_hash_map[dag_id] = smd.dag_hash
# TODO: Remove this logging once the origin of flaky test is identified and fixed.
# Log (dag_id, hash) pairs for debugging flaky test failures.
for dag_id, dag_hash in sorted(dag_hash_map.items()):
logger.info("(%s, %s)", dag_id, dag_hash)
return hashes
first_hashes = get_hash_set()
# assert that the hashes are the same
assert first_hashes == get_hash_set()
def test_get_latest_serdag_versions(self, dag_maker, session):
# first dag
with dag_maker("dag1") as dag:
EmptyOperator(task_id="task1")
sync_dag_to_db(dag, session=session)
dag_maker.create_dagrun()
with dag_maker("dag1") as dag:
EmptyOperator(task_id="task1")
EmptyOperator(task_id="task2")
sync_dag_to_db(dag, session=session)
dag_maker.create_dagrun(run_id="test2", logical_date=pendulum.datetime(2025, 1, 1))
# second dag
with dag_maker("dag2") as dag:
EmptyOperator(task_id="task1")
sync_dag_to_db(dag, session=session)
dag_maker.create_dagrun(run_id="test3", logical_date=pendulum.datetime(2025, 1, 2))
with dag_maker("dag2") as dag:
EmptyOperator(task_id="task1")
EmptyOperator(task_id="task2")
sync_dag_to_db(dag, session=session)
# Total serdags should be 4
assert session.scalar(select(func.count()).select_from(SDM)) == 4
latest_versions = SDM.get_latest_serialized_dags(dag_ids=["dag1", "dag2"], session=session)
assert len(latest_versions) == 2
def test_new_dag_versions_are_not_created_if_no_dagruns(self, dag_maker, session):
with dag_maker("dag1") as dag:
PythonOperator(task_id="task1", python_callable=lambda: None)
assert session.query(SDM).count() == 1
sdm1 = SDM.get(dag.dag_id, session=session)
dag_hash = sdm1.dag_hash
created_at = sdm1.created_at
last_updated = sdm1.last_updated
# new task
PythonOperator(task_id="task2", python_callable=lambda: None, dag=dag)
SDM.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name="dag_maker")
sdm2 = SDM.get(dag.dag_id, session=session)
assert sdm2.dag_hash != dag_hash # first recorded serdag
assert sdm2.created_at == created_at
assert sdm2.last_updated != last_updated
assert session.query(DagVersion).count() == 1
assert session.query(SDM).count() == 1
def test_new_dag_versions_are_created_if_there_is_a_dagrun(self, dag_maker, session):
with dag_maker("dag1") as dag:
PythonOperator(task_id="task1", python_callable=lambda: None)
dag_maker.create_dagrun(run_id="test3", logical_date=pendulum.datetime(2025, 1, 2))
assert session.query(SDM).count() == 1
assert session.query(DagVersion).count() == 1
# new task
PythonOperator(task_id="task2", python_callable=lambda: None, dag=dag)
SDM.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name="dag_maker")
assert session.query(DagVersion).count() == 2
assert session.query(SDM).count() == 2
def test_example_dag_sorting_serialised_dag(self, session):
"""
This test asserts if different dag ids -- simple or complex, can be sorted
"""
example_dags = self._write_example_dags()
for _, dag in example_dags.items():
# flip the tags, the sorting function should sort it alphabetically
if dag.tags:
dag.tags = sorted(dag.tags, reverse=True)
sorted_dag = SDM._sort_serialized_dag_dict(dag)
assert sorted_dag == dag
def test_get_dependencies(self, session):
self._write_example_dags()
dag_id = "consumes_asset_decorator"
dependencies = SDM.get_dag_dependencies(session=session)
assert dag_id in dependencies
# Simulate deleting the DAG from file.
session.execute(update(DagModel).where(DagModel.dag_id == dag_id).values(is_stale=True))
dependencies = SDM.get_dag_dependencies(session=session)
assert dag_id not in dependencies
def test_get_dependencies_with_asset_ref(self, dag_maker, session):
asset_name = "name"
asset_uri = "test://asset1"
asset_id = 1
db.clear_db_assets()
session.add_all(
[
AssetModel(id=asset_id, uri=asset_uri, name=asset_name),
AssetActive(uri=asset_uri, name=asset_name),
]
)
session.commit()
with dag_maker(
dag_id="test_get_dependencies_with_asset_ref_example",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=[Asset.ref(uri=asset_uri), Asset.ref(uri="test://no-such-asset/")],
) as dag:
BashOperator(task_id="any", bash_command="sleep 5")
sync_dag_to_db(dag, session=session)
dependencies = SDM.get_dag_dependencies(session=session)
assert dependencies == {
"test_get_dependencies_with_asset_ref_example": [
DagDependency(
source="asset",
target="test_get_dependencies_with_asset_ref_example",
label=asset_name,
dependency_type="asset",
dependency_id=f"{asset_id}",
),
DagDependency(
source="asset-uri-ref",
target="test_get_dependencies_with_asset_ref_example",
label="test://no-such-asset/",
dependency_type="asset-uri-ref",
dependency_id="test://no-such-asset/",
),
]
}
db.clear_db_assets()
def test_get_dependencies_with_asset_alias(self, dag_maker, session):
db.clear_db_assets()
asset_name = "name"
asset_uri = "test://asset1"
asset_id = 1
asset_model = AssetModel(id=asset_id, uri=asset_uri, name=asset_name)
aam1 = AssetAliasModel(name="alias_1") # resolve to asset
aam2 = AssetAliasModel(name="alias_2") # resolve to nothing
session.add_all([aam1, aam2, asset_model, AssetActive.for_asset(asset_model)])
aam1.assets.append(asset_model)
session.commit()
with dag_maker(
dag_id="test_get_dependencies_with_asset_alias",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
schedule=[AssetAlias(name="alias_1"), AssetAlias(name="alias_2")],
) as dag:
BashOperator(task_id="any", bash_command="sleep 5")
sync_dag_to_db(dag, session=session)
dependencies = SDM.get_dag_dependencies(session=session)
assert dependencies == {
"test_get_dependencies_with_asset_alias": [
DagDependency(
source="asset",
target="asset-alias:alias_1",
label="name",
dependency_type="asset",
dependency_id="1",
),
DagDependency(
source="asset:1",
target="test_get_dependencies_with_asset_alias",
label="alias_1",
dependency_type="asset-alias",
dependency_id="alias_1",
),
DagDependency(
source="asset-alias",
target="test_get_dependencies_with_asset_alias",
label="alias_2",
dependency_type="asset-alias",
dependency_id="alias_2",
),
]
}
db.clear_db_assets()
@pytest.mark.parametrize(
("provide_interval", "new_task", "should_write"),
[
(True, True, False),
(True, False, False),
(False, True, True),
(False, False, False),
],
)
def test_min_update_interval_is_respected(self, provide_interval, new_task, should_write, dag_maker):
min_update_interval = 10 if provide_interval else 0
with dag_maker("dag1") as dag:
PythonOperator(task_id="task1", python_callable=lambda: None)
if new_task:
PythonOperator(task_id="task2", python_callable=lambda: None, dag=dag)
did_write = SDM.write_dag(
LazyDeserializedDAG.from_dag(dag),
bundle_name="dag_maker",
min_update_interval=min_update_interval,
)
assert did_write is should_write
def test_new_dag_version_created_when_bundle_name_changes_and_hash_unchanged(self, dag_maker, session):
"""Test that new dag_version is created if bundle_name changes but DAG is unchanged."""
# Create and write initial DAG
initial_bundle = "bundleA"
with dag_maker("test_dag_update_bundle", bundle_name=initial_bundle) as dag:
EmptyOperator(task_id="task1")
# Create TIs
dag_maker.create_dagrun(run_id="test_run")
assert session.query(DagVersion).count() == 1
# Write the same DAG (no changes, so hash is the same) with a new bundle_name
new_bundle = "bundleB"
SDM.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name=new_bundle)
# There should now be two versions of the DAG
assert session.query(DagVersion).count() == 2
def test_hash_method_removes_fileloc_and_remains_consistent(self):
"""Test that the hash method removes fileloc before hashing."""
test_data = {
"__version": 1,
"dag": {
"fileloc": "/path/to/dag.py",
"dag_id": "test_dag",
"tasks": {
"task1": {"task_id": "task1"},
},
},
}
hash_with_fileloc = SDM.hash(test_data)
# Modify only the top-level dag.fileloc path (simulating file location changes)
test_data["dag"]["fileloc"] = "/different/path/to/dag.py"
# Get hash with different top-level fileloc (should be the same)
hash_with_different_fileloc = SDM.hash(test_data)
# Hashes should be identical since top-level dag.fileloc is removed before hashing
assert hash_with_fileloc == hash_with_different_fileloc
# Verify that the original data still has fileloc (method shouldn't modify original)
assert "fileloc" in test_data["dag"]
assert test_data["dag"]["fileloc"] == "/different/path/to/dag.py"
def test_dynamic_dag_update_preserves_null_check(self, dag_maker, session):
"""
Test that dynamic DAG update gracefully handles case where SerializedDagModel doesn't exist.
This preserves the null-check fix from PR #56422 and tests the direct UPDATE path.
"""
with dag_maker(dag_id="test_missing_serdag", serialized=True, session=session) as dag:
EmptyOperator(task_id="task1")
# Write the DAG first
lazy_dag = LazyDeserializedDAG.from_dag(dag)
SDM.write_dag(
dag=lazy_dag,
bundle_name="test_bundle",
bundle_version=None,
session=session,
)
session.commit()
# Get the dag_version
dag_version = session.scalar(
select(DagVersion).where(DagVersion.dag_id == "test_missing_serdag").limit(1)
)
assert dag_version is not None
# Manually delete SerializedDagModel (simulates edge case)
session.query(SDM).filter(SDM.dag_id == "test_missing_serdag").delete()
session.commit()
# Verify no SerializedDagModel exists
assert SDM.get("test_missing_serdag", session=session) is None
# Try to update - should return False gracefully (not crash)
result = SDM.write_dag(
dag=lazy_dag,
bundle_name="test_bundle",
bundle_version=None,
min_update_interval=None,
session=session,
)
assert result is False # Should return False when SerializedDagModel is missing
def test_dynamic_dag_update_success(self, dag_maker, session):
"""
Test that dynamic DAG update successfully updates the serialized DAG hash
when no task instances exist.
"""
with dag_maker(dag_id="test_dynamic_success", session=session) as dag:
EmptyOperator(task_id="task1")
# Write the DAG first
lazy_dag = LazyDeserializedDAG.from_dag(dag)
result1 = SDM.write_dag(
dag=lazy_dag,
bundle_name="test_bundle",
bundle_version=None,
session=session,
)
session.commit()
assert result1 is True
initial_sdag = SDM.get("test_dynamic_success", session=session)
assert initial_sdag is not None
initial_hash = initial_sdag.dag_hash
# Modify the DAG (add a task)
EmptyOperator(task_id="task2", dag=dag)
lazy_dag_updated = LazyDeserializedDAG.from_dag(dag)
# Write again - should use UPDATE path (no task instances yet)
result2 = SDM.write_dag(
dag=lazy_dag_updated,
bundle_name="test_bundle",
bundle_version=None,
session=session,
)
session.commit()
# Verify update succeeded
assert result2 is True
updated_sdag = SDM.get("test_dynamic_success", session=session)
assert updated_sdag.dag_hash != initial_hash # Hash should change
assert len(updated_sdag.dag.task_dict) == 2 # Should have 2 tasks now
def test_write_dag_atomicity_on_dagcode_failure(self, dag_maker, session):
"""
Test that SerializedDagModel.write_dag maintains atomicity.
If DagCode.write_code fails, the entire transaction should rollback,
including the DagVersion. This test verifies that DagVersion is not
committed separately, which would leave orphaned records.
This test would fail if DagVersion.write_dag() was used (which commits
immediately), because the DagVersion would be persisted even though
the rest of the transaction failed.
"""
from airflow.models.dagcode import DagCode
with dag_maker("test_atomicity_dag"):
EmptyOperator(task_id="task1")
dag = dag_maker.dag
initial_version_count = session.query(DagVersion).filter(DagVersion.dag_id == dag.dag_id).count()
assert initial_version_count == 1, "Should have one DagVersion after initial write"
dag_maker.create_dagrun() # ensure the second dag version is created
EmptyOperator(task_id="task2", dag=dag)
modified_lazy_dag = LazyDeserializedDAG.from_dag(dag)
# Mock DagCode.write_code to raise an exception
with mock.patch.object(
DagCode, "write_code", side_effect=RuntimeError("Simulated DagCode.write_code failure")
):
with pytest.raises(RuntimeError, match="Simulated DagCode.write_code failure"):
SDM.write_dag(
dag=modified_lazy_dag,
bundle_name="testing",
bundle_version=None,
session=session,
)
session.rollback()
# Verify that no new DagVersion was committed
# Use a fresh session to ensure we're reading from committed data
with create_session() as fresh_session:
final_version_count = (
fresh_session.query(DagVersion).filter(DagVersion.dag_id == dag.dag_id).count()
)
assert final_version_count == initial_version_count, (
"DagVersion should not be committed when DagCode.write_code fails"
)
sdag = SDM.get(dag.dag_id, session=fresh_session)
assert sdag is not None, "Original SerializedDagModel should still exist"
assert len(sdag.dag.task_dict) == 1, (
"SerializedDagModel should not be updated when write fails"
)
|
TestSerializedDagModel
|
python
|
Unity-Technologies__ml-agents
|
ml-agents-envs/mlagents_envs/rpc_utils.py
|
{
"start": 2521,
"end": 16457
}
|
class ____:
"""
Simple file-like class that wraps a bytes, and allows moving its "start"
position in the bytes. This is only used for reading concatenated PNGs,
because Pillow always calls seek(0) at the start of reading.
"""
__slots__ = ["fp", "offset"]
def __init__(self, data: bytes):
self.fp = io.BytesIO(data)
self.offset = 0
def seek(self, offset: int, whence: int = io.SEEK_SET) -> int:
if whence == io.SEEK_SET:
res = self.fp.seek(offset + self.offset)
return res - self.offset
raise NotImplementedError()
def tell(self) -> int:
return self.fp.tell() - self.offset
def read(self, size: int = -1) -> bytes:
return self.fp.read(size)
def original_tell(self) -> int:
"""
Returns the offset into the original byte array
"""
return self.fp.tell()
@timed
def process_pixels(
image_bytes: bytes, expected_channels: int, mappings: Optional[List[int]] = None
) -> np.ndarray:
"""
Converts byte array observation image into numpy array, re-sizes it,
and optionally converts it to grey scale
:param image_bytes: input byte array corresponding to image
:param expected_channels: Expected output channels
:return: processed numpy array of observation from environment
"""
image_fp = OffsetBytesIO(image_bytes)
image_arrays = []
# Read the images back from the bytes (without knowing the sizes).
while True:
with hierarchical_timer("image_decompress"):
image = Image.open(image_fp)
# Normally Image loads lazily, load() forces it to do loading in the timer scope.
image.load()
image_arrays.append(
np.moveaxis(np.array(image, dtype=np.float32) / 255.0, -1, 0)
)
# Look for the next header, starting from the current stream location
try:
new_offset = image_bytes.index(PNG_HEADER, image_fp.original_tell())
image_fp.offset = new_offset
except ValueError:
# Didn't find the header, so must be at the end.
break
if mappings is not None and len(mappings) > 0:
return _process_images_mapping(image_arrays, mappings)
else:
return _process_images_num_channels(image_arrays, expected_channels)
def _process_images_mapping(image_arrays, mappings):
"""
Helper function for processing decompressed images with compressed channel mappings.
"""
image_arrays = np.concatenate(image_arrays, axis=0).transpose((0, 1, 2))
if len(mappings) != len(image_arrays):
raise UnityObservationException(
f"Compressed observation and its mapping had different number of channels - "
f"observation had {len(image_arrays)} channels but its mapping had {len(mappings)} channels"
)
if len({m for m in mappings if m > -1}) != max(mappings) + 1:
raise UnityObservationException(
f"Invalid Compressed Channel Mapping: the mapping {mappings} does not have the correct format."
)
if max(mappings) >= len(image_arrays):
raise UnityObservationException(
f"Invalid Compressed Channel Mapping: the mapping has index larger than the total "
f"number of channels in observation - mapping index {max(mappings)} is"
f"invalid for input observation with {len(image_arrays)} channels."
)
processed_image_arrays: List[np.array] = [[] for _ in range(max(mappings) + 1)]
for mapping_idx, img in zip(mappings, image_arrays):
if mapping_idx > -1:
processed_image_arrays[mapping_idx].append(img)
for i, img_array in enumerate(processed_image_arrays):
processed_image_arrays[i] = np.mean(img_array, axis=0)
img = np.stack(processed_image_arrays, axis=0)
return img
def _process_images_num_channels(image_arrays, expected_channels):
"""
Helper function for processing decompressed images with number of expected channels.
This is for old API without mapping provided. Use the first n channel, n=expected_channels.
"""
if expected_channels == 1:
# Convert to grayscale
img = np.mean(image_arrays[0], axis=0)
img = np.reshape(img, [1, img.shape[0], img.shape[1]])
else:
img = np.concatenate(image_arrays, axis=0)
# We can drop additional channels since they may need to be added to include
# numbers of observation channels not divisible by 3.
actual_channels = list(img.shape)[0]
if actual_channels > expected_channels:
img = img[0:expected_channels, ...]
return img
def _check_observations_match_spec(
obs_index: int,
observation_spec: ObservationSpec,
agent_info_list: Collection[AgentInfoProto],
) -> None:
"""
Check that all the observations match the expected size.
This gives a nicer error than a cryptic numpy error later.
"""
expected_obs_shape = tuple(observation_spec.shape)
for agent_info in agent_info_list:
agent_obs_shape = tuple(agent_info.observations[obs_index].shape)
if expected_obs_shape != agent_obs_shape:
raise UnityObservationException(
f"Observation at index={obs_index} for agent with "
f"id={agent_info.id} didn't match the ObservationSpec. "
f"Expected shape {expected_obs_shape} but got {agent_obs_shape}."
)
@timed
def _observation_to_np_array(
obs: ObservationProto, expected_shape: Optional[Iterable[int]] = None
) -> np.ndarray:
"""
Converts observation proto into numpy array of the appropriate size.
:param obs: observation proto to be converted
:param expected_shape: optional shape information, used for sanity checks.
:return: processed numpy array of observation from environment
"""
if expected_shape is not None:
if list(obs.shape) != list(expected_shape):
raise UnityObservationException(
f"Observation did not have the expected shape - got {obs.shape} but expected {expected_shape}"
)
expected_channels = obs.shape[0]
if obs.compression_type == COMPRESSION_TYPE_NONE:
img = np.array(obs.float_data.data, dtype=np.float32)
img = np.reshape(img, obs.shape)
return img
else:
img = process_pixels(
obs.compressed_data, expected_channels, list(obs.compressed_channel_mapping)
)
# Compare decompressed image size to observation shape and make sure they match
if list(obs.shape) != list(img.shape):
raise UnityObservationException(
f"Decompressed observation did not have the expected shape - "
f"decompressed had {img.shape} but expected {obs.shape}"
)
return img
@timed
def _process_maybe_compressed_observation(
obs_index: int,
observation_spec: ObservationSpec,
agent_info_list: Collection[AgentInfoProto],
) -> np.ndarray:
shape = cast(Tuple[int, int, int], observation_spec.shape)
if len(agent_info_list) == 0:
return np.zeros((0, shape[0], shape[1], shape[2]), dtype=np.float32)
try:
batched_visual = [
_observation_to_np_array(agent_obs.observations[obs_index], shape)
for agent_obs in agent_info_list
]
except ValueError:
# Try to get a more useful error message
_check_observations_match_spec(obs_index, observation_spec, agent_info_list)
# If that didn't raise anything, raise the original error
raise
return np.array(batched_visual, dtype=np.float32)
def _raise_on_nan_and_inf(data: np.array, source: str) -> np.array:
# Check for NaNs or Infinite values in the observation or reward data.
# If there's a NaN in the observations, the np.mean() result will be NaN
# If there's an Infinite value (either sign) then the result will be Inf
# See https://stackoverflow.com/questions/6736590/fast-check-for-nan-in-numpy for background
# Note that a very large values (larger than sqrt(float_max)) will result in an Inf value here
# Raise a Runtime error in the case that NaNs or Infinite values make it into the data.
if data.size == 0:
return data
d = np.mean(data)
has_nan = np.isnan(d)
has_inf = not np.isfinite(d)
if has_nan:
raise RuntimeError(f"The {source} provided had NaN values.")
if has_inf:
raise RuntimeError(f"The {source} provided had Infinite values.")
@timed
def _process_rank_one_or_two_observation(
obs_index: int,
observation_spec: ObservationSpec,
agent_info_list: Collection[AgentInfoProto],
) -> np.ndarray:
if len(agent_info_list) == 0:
return np.zeros((0,) + observation_spec.shape, dtype=np.float32)
try:
np_obs = np.array(
[
agent_obs.observations[obs_index].float_data.data
for agent_obs in agent_info_list
],
dtype=np.float32,
).reshape((len(agent_info_list),) + observation_spec.shape)
except ValueError:
# Try to get a more useful error message
_check_observations_match_spec(obs_index, observation_spec, agent_info_list)
# If that didn't raise anything, raise the original error
raise
_raise_on_nan_and_inf(np_obs, "observations")
return np_obs
@timed
def steps_from_proto(
agent_info_list: Collection[AgentInfoProto], behavior_spec: BehaviorSpec
) -> Tuple[DecisionSteps, TerminalSteps]:
decision_agent_info_list = [
agent_info for agent_info in agent_info_list if not agent_info.done
]
terminal_agent_info_list = [
agent_info for agent_info in agent_info_list if agent_info.done
]
decision_obs_list: List[np.ndarray] = []
terminal_obs_list: List[np.ndarray] = []
for obs_index, observation_spec in enumerate(behavior_spec.observation_specs):
is_visual = len(observation_spec.shape) == 3
if is_visual:
decision_obs_list.append(
_process_maybe_compressed_observation(
obs_index, observation_spec, decision_agent_info_list
)
)
terminal_obs_list.append(
_process_maybe_compressed_observation(
obs_index, observation_spec, terminal_agent_info_list
)
)
else:
decision_obs_list.append(
_process_rank_one_or_two_observation(
obs_index, observation_spec, decision_agent_info_list
)
)
terminal_obs_list.append(
_process_rank_one_or_two_observation(
obs_index, observation_spec, terminal_agent_info_list
)
)
decision_rewards = np.array(
[agent_info.reward for agent_info in decision_agent_info_list], dtype=np.float32
)
terminal_rewards = np.array(
[agent_info.reward for agent_info in terminal_agent_info_list], dtype=np.float32
)
decision_group_rewards = np.array(
[agent_info.group_reward for agent_info in decision_agent_info_list],
dtype=np.float32,
)
terminal_group_rewards = np.array(
[agent_info.group_reward for agent_info in terminal_agent_info_list],
dtype=np.float32,
)
_raise_on_nan_and_inf(decision_rewards, "rewards")
_raise_on_nan_and_inf(terminal_rewards, "rewards")
_raise_on_nan_and_inf(decision_group_rewards, "group_rewards")
_raise_on_nan_and_inf(terminal_group_rewards, "group_rewards")
decision_group_id = [agent_info.group_id for agent_info in decision_agent_info_list]
terminal_group_id = [agent_info.group_id for agent_info in terminal_agent_info_list]
max_step = np.array(
[agent_info.max_step_reached for agent_info in terminal_agent_info_list],
dtype=bool,
)
decision_agent_id = np.array(
[agent_info.id for agent_info in decision_agent_info_list], dtype=np.int32
)
terminal_agent_id = np.array(
[agent_info.id for agent_info in terminal_agent_info_list], dtype=np.int32
)
action_mask = None
if behavior_spec.action_spec.discrete_size > 0:
if any(
[agent_info.action_mask is not None]
for agent_info in decision_agent_info_list
):
n_agents = len(decision_agent_info_list)
a_size = np.sum(behavior_spec.action_spec.discrete_branches)
mask_matrix = np.ones((n_agents, a_size), dtype=bool)
for agent_index, agent_info in enumerate(decision_agent_info_list):
if agent_info.action_mask is not None:
if len(agent_info.action_mask) == a_size:
mask_matrix[agent_index, :] = [
False if agent_info.action_mask[k] else True
for k in range(a_size)
]
action_mask = (1 - mask_matrix).astype(bool)
indices = _generate_split_indices(
behavior_spec.action_spec.discrete_branches
)
action_mask = np.split(action_mask, indices, axis=1)
return (
DecisionSteps(
decision_obs_list,
decision_rewards,
decision_agent_id,
action_mask,
decision_group_id,
decision_group_rewards,
),
TerminalSteps(
terminal_obs_list,
terminal_rewards,
max_step,
terminal_agent_id,
terminal_group_id,
terminal_group_rewards,
),
)
def _generate_split_indices(dims):
if len(dims) <= 1:
return ()
result = (dims[0],)
for i in range(len(dims) - 2):
result += (dims[i + 1] + result[i],)
return result
|
OffsetBytesIO
|
python
|
cherrypy__cherrypy
|
cherrypy/test/test_conn.py
|
{
"start": 30557,
"end": 31261
}
|
class ____(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_No_CRLF(self):
self.persistent = True
conn = self.HTTP_CONN
conn.send(b'GET /hello HTTP/1.1\n\n')
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.body = response.read()
self.assertBody('HTTP requires CRLF terminators')
conn.close()
conn.connect()
conn.send(b'GET /hello HTTP/1.1\r\n\n')
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.body = response.read()
self.assertBody('HTTP requires CRLF terminators')
conn.close()
|
BadRequestTests
|
python
|
sqlalchemy__sqlalchemy
|
test/engine/test_execute.py
|
{
"start": 13609,
"end": 30371
}
|
class ____(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True, autoincrement=False),
Column("user_name", VARCHAR(20)),
)
Table(
"users_autoinc",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
)
def test_no_params_option(self):
stmt = (
"SELECT '%'"
+ testing.db.dialect.statement_compiler(
testing.db.dialect, None
).default_from()
)
with testing.db.connect() as conn:
result = (
conn.execution_options(no_parameters=True)
.exec_driver_sql(stmt)
.scalar()
)
eq_(result, "%")
@testing.requires.qmark_paramstyle
def test_raw_qmark(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
(1, "jack"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
(2, "fred"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
[(3, "ed"), (4, "horse")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
[(5, "barney"), (6, "donkey")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)",
(7, "sally"),
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "fred"),
(3, "ed"),
(4, "horse"),
(5, "barney"),
(6, "donkey"),
(7, "sally"),
]
res = conn.exec_driver_sql(
"select * from users where user_name=?", ("jack",)
)
assert res.fetchall() == [(1, "jack")]
@testing.requires.format_paramstyle
def test_raw_sprintf(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (%s, %s)",
(1, "jack"),
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (%s, %s)",
[(2, "ed"), (3, "horse")],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (%s, %s)",
(4, "sally"),
)
conn.exec_driver_sql("insert into users (user_id) values (%s)", (5,))
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
(5, None),
]
res = conn.exec_driver_sql(
"select * from users where user_name=%s", ("jack",)
)
assert res.fetchall() == [(1, "jack")]
@testing.requires.pyformat_paramstyle
def test_raw_python(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
{"id": 1, "name": "jack"},
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
[{"id": 2, "name": "ed"}, {"id": 3, "name": "horse"}],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) "
"values (%(id)s, %(name)s)",
dict(id=4, name="sally"),
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
]
@testing.requires.named_paramstyle
def test_raw_named(self, connection):
conn = connection
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (:id, :name)",
{"id": 1, "name": "jack"},
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (:id, :name)",
[{"id": 2, "name": "ed"}, {"id": 3, "name": "horse"}],
)
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (:id, :name)",
{"id": 4, "name": "sally"},
)
res = conn.exec_driver_sql("select * from users order by user_id")
assert res.fetchall() == [
(1, "jack"),
(2, "ed"),
(3, "horse"),
(4, "sally"),
]
def test_raw_tuple_params(self, connection):
"""test #7820
There was an apparent improvement in the distill params
methodology used in exec_driver_sql which allows raw tuples to
pass through. In 1.4 there seems to be a _distill_cursor_params()
function that says it can handle this kind of parameter, but it isn't
used and when I tried to substitute it in for exec_driver_sql(),
things still fail.
In any case, add coverage here for the use case of passing
direct tuple params to exec_driver_sql including as the first
param, to note that it isn't mis-interpreted the way it is
in 1.x.
"""
with patch.object(connection.dialect, "do_execute") as do_exec:
connection.exec_driver_sql(
"UPDATE users SET user_name = 'query_one' WHERE "
"user_id = %s OR user_id IN %s",
(3, (1, 2)),
)
connection.exec_driver_sql(
"UPDATE users SET user_name = 'query_two' WHERE "
"user_id IN %s OR user_id = %s",
((1, 2), 3),
)
eq_(
do_exec.mock_calls,
[
call(
mock.ANY,
"UPDATE users SET user_name = 'query_one' "
"WHERE user_id = %s OR user_id IN %s",
connection.dialect.execute_sequence_format((3, (1, 2))),
mock.ANY,
),
call(
mock.ANY,
"UPDATE users SET user_name = 'query_two' "
"WHERE user_id IN %s OR user_id = %s",
connection.dialect.execute_sequence_format(((1, 2), 3)),
mock.ANY,
),
],
)
def test_non_dict_mapping(self, connection):
"""ensure arbitrary Mapping works for execute()"""
class NotADict(collections_abc.Mapping):
def __init__(self, _data):
self._data = _data
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self):
return self._data.keys()
nd = NotADict({"a": 10, "b": 15})
eq_(dict(nd), {"a": 10, "b": 15})
result = connection.execute(
select(
bindparam("a", type_=Integer), bindparam("b", type_=Integer)
),
nd,
)
eq_(result.first(), (10, 15))
def test_row_works_as_mapping(self, connection):
"""ensure the RowMapping object works as a parameter dictionary for
execute."""
result = connection.execute(
select(literal(10).label("a"), literal(15).label("b"))
)
row = result.first()
eq_(row, (10, 15))
eq_(row._mapping, {"a": 10, "b": 15})
result = connection.execute(
select(
bindparam("a", type_=Integer).label("a"),
bindparam("b", type_=Integer).label("b"),
),
row._mapping,
)
row = result.first()
eq_(row, (10, 15))
eq_(row._mapping, {"a": 10, "b": 15})
def test_exception_wrapping_dbapi(self):
with testing.db.connect() as conn:
assert_raises_message(
tsa.exc.DBAPIError,
r"not_a_valid_statement",
conn.exec_driver_sql,
"not_a_valid_statement",
)
def test_exception_wrapping_orig_accessors(self):
de = None
with testing.db.connect() as conn:
try:
conn.exec_driver_sql("not_a_valid_statement")
except tsa.exc.DBAPIError as de_caught:
de = de_caught
assert isinstance(de.orig, conn.dialect.dbapi.Error)
# get the driver module name, the one which we know will provide
# for exceptions
top_level_dbapi_module = conn.dialect.dbapi
if isinstance(top_level_dbapi_module, AsyncAdapt_dbapi_module):
driver_module = top_level_dbapi_module.exceptions_module
else:
driver_module = top_level_dbapi_module
top_level_dbapi_module = driver_module.__name__.split(".")[0]
# check that it's not us
ne_(top_level_dbapi_module, "sqlalchemy")
# then make sure driver_exception is from that module
assert type(de.driver_exception).__module__.startswith(
top_level_dbapi_module
)
@testing.requires.sqlite
def test_exception_wrapping_non_dbapi_error(self):
e = create_engine("sqlite://")
e.dialect.is_disconnect = is_disconnect = Mock()
with e.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(
execute=Mock(
side_effect=TypeError("I'm not a DBAPI error")
)
)
)
assert_raises_message(
TypeError,
"I'm not a DBAPI error",
c.exec_driver_sql,
"select ",
)
eq_(is_disconnect.call_count, 0)
def test_exception_wrapping_non_standard_dbapi_error(self):
class DBAPIError(Exception):
pass
class OperationalError(DBAPIError):
pass
class NonStandardException(OperationalError):
pass
# TODO: this test is assuming too much of arbitrary dialects and would
# be better suited tested against a single mock dialect that does not
# have any special behaviors
with (
patch.object(testing.db.dialect, "dbapi", Mock(Error=DBAPIError)),
patch.object(
testing.db.dialect, "loaded_dbapi", Mock(Error=DBAPIError)
),
patch.object(
testing.db.dialect, "is_disconnect", lambda *arg: False
),
patch.object(
testing.db.dialect,
"do_execute",
Mock(side_effect=NonStandardException),
),
patch.object(
testing.db.dialect.execution_ctx_cls,
"handle_dbapi_exception",
Mock(),
),
):
with testing.db.connect() as conn:
assert_raises(
tsa.exc.OperationalError, conn.exec_driver_sql, "select 1"
)
def test_exception_wrapping_non_dbapi_statement(self):
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
raise SomeException("nope")
def _go(conn):
assert_raises_message(
tsa.exc.StatementError,
r"\(.*.SomeException\) " r"nope\n\[SQL\: u?SELECT 1 ",
conn.execute,
select(1).where(column("foo") == literal("bar", MyType())),
)
with testing.db.connect() as conn:
_go(conn)
def test_stmt_exception_pickleable_no_dbapi(self):
self._test_stmt_exception_pickleable(Exception("hello world"))
@testing.crashes(
"postgresql+psycopg2",
"Older versions don't support cursor pickling, newer ones do",
)
@testing.fails_on(
"+mysqlconnector",
"Exception doesn't come back exactly the same from pickle",
)
@testing.fails_on(
"oracle+cx_oracle",
"cx_oracle exception seems to be having some issue with pickling",
)
@testing.fails_on(
"oracle+oracledb",
"oracledb exception seems to be having some issue with pickling",
)
def test_stmt_exception_pickleable_plus_dbapi(self):
raw = testing.db.raw_connection()
the_orig = None
try:
try:
cursor = raw.cursor()
cursor.execute("SELECTINCORRECT")
except testing.db.dialect.dbapi.Error as orig:
# py3k has "orig" in local scope...
the_orig = orig
finally:
raw.close()
self._test_stmt_exception_pickleable(the_orig)
def _test_stmt_exception_pickleable(self, orig):
for sa_exc in (
tsa.exc.StatementError(
"some error",
"select * from table",
{"foo": "bar"},
orig,
False,
),
tsa.exc.InterfaceError(
"select * from table", {"foo": "bar"}, orig, True
),
tsa.exc.NoReferencedTableError("message", "tname"),
tsa.exc.NoReferencedColumnError("message", "tname", "cname"),
tsa.exc.CircularDependencyError(
"some message", [1, 2, 3], [(1, 2), (3, 4)]
),
):
for loads, dumps in picklers():
repickled = loads(dumps(sa_exc))
eq_(repickled.args[0], sa_exc.args[0])
if isinstance(sa_exc, tsa.exc.StatementError):
eq_(repickled.params, {"foo": "bar"})
eq_(repickled.statement, sa_exc.statement)
if hasattr(sa_exc, "connection_invalidated"):
eq_(
repickled.connection_invalidated,
sa_exc.connection_invalidated,
)
eq_(repickled.orig.args[0], orig.args[0])
def test_dont_wrap_mixin(self):
class MyException(Exception, tsa.exc.DontWrapMixin):
pass
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
raise MyException("nope")
def _go(conn):
assert_raises_message(
MyException,
"nope",
conn.execute,
select(1).where(column("foo") == literal("bar", MyType())),
)
conn = testing.db.connect()
try:
_go(conn)
finally:
conn.close()
def test_empty_insert(self, connection):
"""test that execute() interprets [] as a list with no params and
warns since it has nothing to do with such an executemany.
"""
users_autoinc = self.tables.users_autoinc
with expect_deprecated(
r"Empty parameter sequence passed to execute\(\). "
"This use is deprecated and will raise an exception in a "
"future SQLAlchemy release"
):
connection.execute(
users_autoinc.insert().values(
user_name=bindparam("name", None)
),
[],
)
eq_(len(connection.execute(users_autoinc.select()).all()), 1)
@testing.only_on("sqlite")
def test_raw_insert_with_empty_list(self, connection):
"""exec_driver_sql instead does not raise if an empty list is passed.
Let the driver do that if it wants to.
"""
conn = connection
with expect_raises_message(
tsa.exc.ProgrammingError, "Incorrect number of bindings supplied"
):
conn.exec_driver_sql(
"insert into users (user_id, user_name) values (?, ?)", []
)
def test_works_after_dispose_testing_engine(self):
eng = engines.testing_engine()
for i in range(3):
with eng.connect() as conn:
eq_(conn.scalar(select(1)), 1)
eng.dispose()
|
ExecuteDriverTest
|
python
|
fluentpython__example-code
|
attic/descriptors/doc_descriptor.py
|
{
"start": 1378,
"end": 1512
}
|
class ____:
"""The "Foo" class"""
bar = DocDescriptor('The "bar" attribute')
bazz = DocDescriptor('The "bazz" attribute')
|
Foo
|
python
|
walkccc__LeetCode
|
solutions/634. Find the Derangement of An Array/634-2.py
|
{
"start": 0,
"end": 210
}
|
class ____:
def findDerangement(self, n: int) -> int:
MOD = 1_000_000_007
dp = [1] + [0] * n
for i in range(2, n + 1):
dp[i] = (i - 1) * (dp[i - 1] + dp[i - 2]) % MOD
return dp[n]
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-all-the-lonely-nodes.py
|
{
"start": 221,
"end": 803
}
|
class ____(object):
def getLonelyNodes(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
result = []
stk = [root]
while stk:
node = stk.pop()
if not node:
continue
if node.left and not node.right:
result.append(node.left.val)
elif node.right and not node.left:
result.append(node.right.val)
stk.append(node.right)
stk.append(node.left)
return result
# Time: O(n)
# Space: O(h)
|
Solution
|
python
|
modin-project__modin
|
modin/tests/pandas/extensions/test_base_extensions.py
|
{
"start": 3190,
"end": 4836
}
|
class ____:
def test_add_simple_method(self, backend, data_class):
expected_string_val = "Some string value"
method_name = "new_method"
@register_base_accessor(name=method_name)
def my_method_implementation(self):
return expected_string_val
modin_object = data_class([1, 2, 3]).set_backend(backend)
assert getattr(modin_object, method_name)() == expected_string_val
assert modin_object.new_method() == expected_string_val
def test_add_non_method(self, data_class, backend):
expected_val = 4
attribute_name = "four"
register_base_accessor(name=attribute_name)(expected_val)
assert data_class().set_backend(backend).four == expected_val
def test_method_uses_existing_methods(self, data_class, backend):
modin_object = data_class([1, 2, 3]).set_backend(backend)
method_name = "self_accessor"
expected_result = modin_object.sum() / modin_object.count()
@register_base_accessor(name=method_name)
def my_average(self):
return self.sum() / self.count()
if data_class is pd.DataFrame:
df_equals(modin_object.self_accessor(), expected_result)
else:
assert modin_object.self_accessor() == expected_result
def test_override_existing_method(self, data_class, backend):
modin_object = data_class([3, 2, 1])
@register_base_accessor(name="copy")
def my_copy(self, *args, **kwargs):
return self + 1
df_equals(modin_object.set_backend(backend).copy(), modin_object + 1)
|
TestOverrideMethodForAllBackends
|
python
|
doocs__leetcode
|
solution/0400-0499/0401.Binary Watch/Solution2.py
|
{
"start": 0,
"end": 303
}
|
class ____:
def readBinaryWatch(self, turnedOn: int) -> List[str]:
ans = []
for i in range(1 << 10):
h, m = i >> 6, i & 0b111111
if h < 12 and m < 60 and i.bit_count() == turnedOn:
ans.append('{:d}:{:02d}'.format(h, m))
return ans
|
Solution
|
python
|
eventlet__eventlet
|
tests/pools_test.py
|
{
"start": 5446,
"end": 5712
}
|
class ____(TestCase):
mode = 'static'
def test_abstract(self):
# Going for 100% coverage here
# A Pool cannot be used without overriding create()
pool = pools.Pool()
self.assertRaises(NotImplementedError, pool.get)
|
TestAbstract
|
python
|
chroma-core__chroma
|
chromadb/test/test_config.py
|
{
"start": 1289,
"end": 3353
}
|
class ____(Component):
def __init__(self, system: System):
data.inits += "D"
super().__init__(system)
@overrides
def start(self) -> None:
data.starts += "D"
@overrides
def stop(self) -> None:
data.stops += "D"
# Dependency Graph for tests:
# ┌───┐
# │ A │
# └┬─┬┘
# │┌▽──┐
# ││ B │
# │└┬─┬┘
# ┌▽─▽┐│
# │ C ││
# └┬──┘│
# ┌▽───▽┐
# │ D │
# └─────┘
def test_leaf_only() -> None:
settings = Settings()
system = System(settings)
reset()
d = system.instance(ComponentD)
assert isinstance(d, ComponentD)
assert data.inits == ["D"]
system.start()
assert data.starts == ["D"]
system.stop()
assert data.stops == ["D"]
def test_partial() -> None:
settings = Settings()
system = System(settings)
reset()
c = system.instance(ComponentC)
assert isinstance(c, ComponentC)
assert data.inits == ["C", "D"]
system.start()
assert data.starts == ["D", "C"]
system.stop()
assert data.stops == ["C", "D"]
def test_system_startup() -> None:
settings = Settings()
system = System(settings)
reset()
a = system.instance(ComponentA)
assert isinstance(a, ComponentA)
assert data.inits == ["A", "B", "C", "D"]
system.start()
assert data.starts == ["D", "C", "B", "A"]
system.stop()
assert data.stops == ["A", "B", "C", "D"]
def test_system_override_order() -> None:
settings = Settings()
system = System(settings)
reset()
system.instance(ComponentA)
# Deterministically shuffle the instances map to prove that topsort is actually
# working and not just implicitly working because of insertion order.
# This causes the test to actually fail if the deps are not wired up correctly.
random.seed(0)
entries = list(system._instances.items())
random.shuffle(entries)
system._instances = {k: v for k, v in entries}
system.start()
assert data.starts == ["D", "C", "B", "A"]
system.stop()
assert data.stops == ["A", "B", "C", "D"]
|
ComponentD
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/state/widgets_test.py
|
{
"start": 24989,
"end": 26135
}
|
class ____(DeltaGeneratorTestCase):
def test_get_widget_user_key(self):
state = get_script_run_ctx().session_state._state
st.checkbox("checkbox", key="c")
k = next(iter(state._keys()))
assert user_key_from_element_id(k) == "c"
def test_get_widget_user_key_none(self):
state = get_script_run_ctx().session_state._state
st.selectbox("selectbox", options=["foo", "bar"])
k = next(iter(state._keys()))
# Absence of a user key is represented as None throughout our code
assert user_key_from_element_id(k) is None
def test_get_widget_user_key_hyphens(self):
state = get_script_run_ctx().session_state._state
st.slider("slider", key="my-slider")
k = next(iter(state._keys()))
assert user_key_from_element_id(k) == "my-slider"
def test_get_widget_user_key_incorrect_none(self):
state = get_script_run_ctx().session_state._state
st.checkbox("checkbox", key="None")
k = next(iter(state._keys()))
# Incorrectly indicates no user key
assert user_key_from_element_id(k) is None
|
WidgetUserKeyTests
|
python
|
django__django
|
tests/template_tests/filter_tests/test_linenumbers.py
|
{
"start": 1243,
"end": 2043
}
|
class ____(SimpleTestCase):
def test_linenumbers(self):
self.assertEqual(linenumbers("line 1\nline 2"), "1. line 1\n2. line 2")
def test_linenumbers2(self):
self.assertEqual(
linenumbers("\n".join(["x"] * 10)),
"01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. x\n08. x\n09. x\n10. x",
)
def test_non_string_input(self):
self.assertEqual(linenumbers(123), "1. 123")
def test_autoescape(self):
self.assertEqual(
linenumbers("foo\n<a>bar</a>\nbuz"),
"1. foo\n2. <a>bar</a>\n3. buz",
)
def test_autoescape_off(self):
self.assertEqual(
linenumbers("foo\n<a>bar</a>\nbuz", autoescape=False),
"1. foo\n2. <a>bar</a>\n3. buz",
)
|
FunctionTests
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-common-factors.py
|
{
"start": 61,
"end": 530
}
|
class ____(object):
def commonFactors(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
def gcd(a, b): # Time: O(log(min(a, b)))
while b:
a, b = b, a%b
return a
g = gcd(a, b)
result = 0
x = 1
while x*x <= g:
if g%x == 0:
result += 1 if g//x == x else 2
x += 1
return result
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
{
"start": 15086,
"end": 17511
}
|
class ____(nn.Module):
"""
The neural network from the paper "Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation" https://huggingface.co/papers/1802.02611
"""
def __init__(self, config: MobileNetV2Config) -> None:
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv_pool = MobileNetV2ConvLayer(
config,
in_channels=apply_depth_multiplier(config, 320),
out_channels=256,
kernel_size=1,
stride=1,
use_normalization=True,
use_activation="relu",
layer_norm_eps=1e-5,
)
self.conv_aspp = MobileNetV2ConvLayer(
config,
in_channels=apply_depth_multiplier(config, 320),
out_channels=256,
kernel_size=1,
stride=1,
use_normalization=True,
use_activation="relu",
layer_norm_eps=1e-5,
)
self.conv_projection = MobileNetV2ConvLayer(
config,
in_channels=512,
out_channels=256,
kernel_size=1,
stride=1,
use_normalization=True,
use_activation="relu",
layer_norm_eps=1e-5,
)
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
self.classifier = MobileNetV2ConvLayer(
config,
in_channels=256,
out_channels=config.num_labels,
kernel_size=1,
use_normalization=False,
use_activation=False,
bias=True,
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
spatial_size = features.shape[-2:]
features_pool = self.avg_pool(features)
features_pool = self.conv_pool(features_pool)
features_pool = nn.functional.interpolate(
features_pool, size=spatial_size, mode="bilinear", align_corners=True
)
features_aspp = self.conv_aspp(features)
features = torch.cat([features_pool, features_aspp], dim=1)
features = self.conv_projection(features)
features = self.dropout(features)
features = self.classifier(features)
return features
@auto_docstring(
custom_intro="""
MobileNetV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.
"""
)
|
MobileNetV2DeepLabV3Plus
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_raise.py
|
{
"start": 10183,
"end": 15679
}
|
class ____(__TestCase):
def test_instance_context_instance_raise(self):
context = IndexError()
try:
try:
raise context
except:
raise OSError()
except OSError as e:
self.assertIs(e.__context__, context)
else:
self.fail("No exception raised")
def test_class_context_instance_raise(self):
context = IndexError
try:
try:
raise context
except:
raise OSError()
except OSError as e:
self.assertIsNot(e.__context__, context)
self.assertIsInstance(e.__context__, context)
else:
self.fail("No exception raised")
def test_class_context_class_raise(self):
context = IndexError
try:
try:
raise context
except:
raise OSError
except OSError as e:
self.assertIsNot(e.__context__, context)
self.assertIsInstance(e.__context__, context)
else:
self.fail("No exception raised")
def test_c_exception_context(self):
try:
try:
1/0
except:
raise OSError
except OSError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_c_exception_raise(self):
try:
try:
1/0
except:
xyzzy
except NameError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_noraise_finally(self):
try:
try:
pass
finally:
raise OSError
except OSError as e:
self.assertIsNone(e.__context__)
else:
self.fail("No exception raised")
def test_raise_finally(self):
try:
try:
1/0
finally:
raise OSError
except OSError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_context_manager(self):
with torch._dynamo.error_on_graph_break(False):
class ContextManager:
def __enter__(self):
pass
def __exit__(self, t, v, tb):
xyzzy
try:
with ContextManager():
1/0
except NameError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_cycle_broken(self):
# Self-cycles (when re-raising a caught exception) are broken
try:
try:
1/0
except ZeroDivisionError as e:
raise e
except ZeroDivisionError as e:
self.assertIsNone(e.__context__)
def test_reraise_cycle_broken(self):
# Non-trivial context cycles (through re-raising a previous exception)
# are broken too.
try:
try:
xyzzy
except NameError as a:
try:
1/0
except ZeroDivisionError:
raise a
except NameError as e:
self.assertIsNone(e.__context__.__context__)
def test_not_last(self):
# Context is not necessarily the last exception
context = Exception("context")
try:
raise context
except Exception:
try:
raise Exception("caught")
except Exception:
pass
try:
raise Exception("new")
except Exception as exc:
raised = exc
self.assertIs(raised.__context__, context)
def test_3118(self):
# deleting the generator caused the __context__ to be cleared
def gen():
try:
yield 1
finally:
pass
def f():
g = gen()
next(g)
try:
try:
raise ValueError
except:
del g
raise KeyError
except Exception as e:
self.assertIsInstance(e.__context__, ValueError)
f()
def test_3611(self):
import gc
# A re-raised exception in a __del__ caused the __context__
# to be cleared
with torch._dynamo.error_on_graph_break(False):
class C:
def __del__(self):
try:
1/0
except:
raise
def f():
x = C()
try:
try:
f.x
except AttributeError:
# make x.__del__ trigger
del x
gc.collect() # For PyPy or other GCs.
raise TypeError
except Exception as e:
self.assertNotEqual(e.__context__, None)
self.assertIsInstance(e.__context__, AttributeError)
with support.catch_unraisable_exception() as cm:
f()
self.assertEqual(ZeroDivisionError, cm.unraisable.exc_type)
|
TestContext
|
python
|
apache__avro
|
lang/py/avro/test/test_protocol.py
|
{
"start": 13749,
"end": 14974
}
|
class ____(unittest.TestCase):
"""Enable generating parse test cases over all the valid and invalid example protocols."""
def __init__(self, test_proto):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super().__init__("parse_valid" if test_proto.valid else "parse_invalid")
self.test_proto = test_proto
def parse_valid(self):
"""Parsing a valid protocol should not error."""
try:
self.test_proto.parse()
except avro.errors.ProtocolParseException: # pragma: no coverage
self.fail(f"Valid protocol failed to parse: {self.test_proto!s}")
def parse_invalid(self):
"""Parsing an invalid protocol should error."""
with self.assertRaises(
(avro.errors.ProtocolParseException, avro.errors.SchemaParseException),
msg=f"Invalid protocol should not have parsed: {self.test_proto!s}",
):
self.test_proto.parse()
|
ProtocolParseTestCase
|
python
|
scipy__scipy
|
scipy/spatial/tests/test_distance.py
|
{
"start": 61090,
"end": 63231
}
|
class ____:
checked_dtypes = [np.float64, np.float32, np.int32, np.int8, bool]
def test_squareform_matrix(self):
for dtype in self.checked_dtypes:
self.check_squareform_matrix(dtype)
def test_squareform_vector(self):
for dtype in self.checked_dtypes:
self.check_squareform_vector(dtype)
def check_squareform_matrix(self, dtype):
A = np.zeros((0, 0), dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (0,))
assert_equal(rA.dtype, dtype)
A = np.zeros((1, 1), dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (0,))
assert_equal(rA.dtype, dtype)
A = np.array([[0, 4.2], [4.2, 0]], dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (1,))
assert_equal(rA.dtype, dtype)
assert_array_equal(rA, np.array([4.2], dtype=dtype))
def check_squareform_vector(self, dtype):
v = np.zeros((0,), dtype=dtype)
rv = squareform(v)
assert_equal(rv.shape, (1, 1))
assert_equal(rv.dtype, dtype)
assert_array_equal(rv, [[0]])
v = np.array([8.3], dtype=dtype)
rv = squareform(v)
assert_equal(rv.shape, (2, 2))
assert_equal(rv.dtype, dtype)
assert_array_equal(rv, np.array([[0, 8.3], [8.3, 0]], dtype=dtype))
def test_squareform_multi_matrix(self):
for n in range(2, 5):
self.check_squareform_multi_matrix(n)
def check_squareform_multi_matrix(self, n):
X = np.random.rand(n, 4)
Y = wpdist_no_const(X)
assert_equal(len(Y.shape), 1)
A = squareform(Y)
Yr = squareform(A)
s = A.shape
k = 0
if verbose >= 3:
print(A.shape, Y.shape, Yr.shape)
assert_equal(len(s), 2)
assert_equal(len(Yr.shape), 1)
assert_equal(s[0], s[1])
for i in range(0, s[0]):
for j in range(i + 1, s[1]):
if i != j:
assert_equal(A[i, j], Y[k])
k += 1
else:
assert_equal(A[i, j], 0)
|
TestSquareForm
|
python
|
Delgan__loguru
|
tests/exceptions/source/diagnose/attributes.py
|
{
"start": 149,
"end": 430
}
|
class ____:
@property
def forbidden(self):
raise RuntimeError
a = Obj()
a.b = "123"
def foo():
x = None
... + 1 + bar(a).b + a.forbidden + a.nope.a + x.__bool__ or a. b . isdigit() and .3 + ...
try:
foo()
except TypeError:
logger.exception("")
|
Obj
|
python
|
conda__conda
|
conda/exceptions.py
|
{
"start": 15585,
"end": 15802
}
|
class ____(CondaIOError):
def __init__(self, filepath: PathType, message: str, *args):
self.filepath = filepath
msg = f"'{filepath}'. {message}"
super().__init__(msg, *args)
|
CondaFileIOError
|
python
|
google__pytype
|
pytype/tools/traces/traces_test.py
|
{
"start": 5763,
"end": 6543
}
|
class ____(MatchAstTestCase):
"""Tests for traces.MatchAstVisitor.match_Name."""
def test_basic(self):
matches = self._get_traces("x = 42", ast.Name)
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", ("int",))])
def test_multiline(self):
matches = self._get_traces("""
x = (1 +
2)
""", ast.Name)
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", ("int",))])
def test_multiline_subscr(self):
matches = self._get_traces("""
x = [0]
x[0] = (1,
2)
""", ast.Name)
x_annot = "list[Union[int, tuple[int, int]]]"
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", (x_annot,)),
((2, 0), "LOAD_NAME", "x", (x_annot,))])
|
MatchNameTest
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/fastapi/simple_hero_api/tutorial001_py310.py
|
{
"start": 99,
"end": 1002
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/")
def create_hero(hero: Hero):
with Session(engine) as session:
session.add(hero)
session.commit()
session.refresh(hero)
return hero
@app.get("/heroes/")
def read_heroes():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
return heroes
|
Hero
|
python
|
getsentry__sentry
|
src/sentry/sentry_apps/api/bases/sentryapps.py
|
{
"start": 10045,
"end": 10763
}
|
class ____(IntegrationPlatformEndpoint):
permission_classes: tuple[type[BasePermission], ...] = (SentryAppPermission,)
def convert_args(
self, request: Request, sentry_app_id_or_slug: int | str, *args: Any, **kwargs: Any
):
try:
sentry_app = SentryApp.objects.get(slug__id_or_slug=sentry_app_id_or_slug)
except SentryApp.DoesNotExist:
raise SentryAppError(message="Could not find the requested sentry app", status_code=404)
self.check_object_permissions(request, sentry_app)
sentry_sdk.get_isolation_scope().set_tag("sentry_app", sentry_app.slug)
kwargs["sentry_app"] = sentry_app
return (args, kwargs)
|
SentryAppBaseEndpoint
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_config/config_type.py
|
{
"start": 1768,
"end": 3921
}
|
class ____:
"""The class backing DagsterTypes as they are used processing configuration data."""
def __init__(
self,
key: str,
kind: ConfigTypeKind,
given_name: Optional[str] = None,
description: Optional[str] = None,
type_params: Optional[Sequence["ConfigType"]] = None,
):
self.key: str = check.str_param(key, "key")
self.kind: ConfigTypeKind = check.inst_param(kind, "kind", ConfigTypeKind)
self.given_name: Optional[str] = check.opt_str_param(given_name, "given_name")
self._description: Optional[str] = check.opt_str_param(description, "description")
self.type_params: Optional[Sequence[ConfigType]] = (
check.sequence_param(type_params, "type_params", of_type=ConfigType)
if type_params
else None
)
@property
def description(self) -> Optional[str]:
return self._description
@staticmethod
def from_builtin_enum(builtin_enum: typing.Any) -> "ConfigType":
check.invariant(BuiltinEnum.contains(builtin_enum), "param must be member of BuiltinEnum")
return _CONFIG_MAP[builtin_enum]
def post_process(self, value):
"""Implement this in order to take a value provided by the user
and perform computation on it. This can be done to coerce data types,
fetch things from the environment (e.g. environment variables), or
to do custom validation. If the value is not valid, throw a
PostProcessingError. Otherwise return the coerced value.
"""
return value
@cached_property
def snapshot(self) -> "ConfigTypeSnap":
from dagster._config.snap import snap_from_config_type
return snap_from_config_type(self)
def type_iterator(self) -> Iterator["ConfigType"]:
yield self
@cached_property
def schema_snapshot(self) -> "ConfigSchemaSnapshot":
from dagster._config.snap import ConfigSchemaSnapshot
return ConfigSchemaSnapshot(
all_config_snaps_by_key={ct.key: ct.snapshot for ct in self.type_iterator()}
)
@whitelist_for_serdes
|
ConfigType
|
python
|
doocs__leetcode
|
solution/2500-2599/2554.Maximum Number of Integers to Choose From a Range I/Solution2.py
|
{
"start": 0,
"end": 641
}
|
class ____:
def maxCount(self, banned: List[int], n: int, maxSum: int) -> int:
banned.extend([0, n + 1])
ban = sorted(x for x in set(banned) if x < n + 2)
ans = 0
for i, j in pairwise(ban):
left, right = 0, j - i - 1
while left < right:
mid = (left + right + 1) >> 1
if (i + 1 + i + mid) * mid // 2 <= maxSum:
left = mid
else:
right = mid - 1
ans += left
maxSum -= (i + 1 + i + left) * left // 2
if maxSum <= 0:
break
return ans
|
Solution
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/functions.py
|
{
"start": 60085,
"end": 60234
}
|
class ____(AnsiFunction[datetime.datetime]):
"""The localtime() SQL function."""
type = sqltypes.DateTime()
inherit_cache = True
|
localtime
|
python
|
ipython__ipython
|
IPython/core/shellapp.py
|
{
"start": 4019,
"end": 5150
}
|
class ____(CaselessStrEnum):
"""An enum of Matplotlib backend strings where the case should be ignored.
Prior to Matplotlib 3.9.0 the list of valid backends is hardcoded in
pylabtools.backends. After that, Matplotlib manages backends.
The list of valid backends is determined when it is first needed to avoid
wasting unnecessary initialisation time.
"""
def __init__(
self: CaselessStrEnum[t.Any],
default_value: t.Any = Undefined,
**kwargs: t.Any,
) -> None:
super().__init__(None, default_value=default_value, **kwargs)
def __getattribute__(self, name):
if name == "values" and object.__getattribute__(self, name) is None:
from IPython.core.pylabtools import _list_matplotlib_backends_and_gui_loops
self.values = _list_matplotlib_backends_and_gui_loops()
return object.__getattribute__(self, name)
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
|
MatplotlibBackendCaselessStrEnum
|
python
|
pytorch__pytorch
|
torch/utils/module_tracker.py
|
{
"start": 460,
"end": 5434
}
|
class ____:
"""
``ModuleTracker`` is a context manager that tracks the nn.Module hierarchy during execution
so that other system can query which Module is currently being executed (or its backward is being
executed).
You can access the ``parents`` attribute on this context manager to get the set of all the
Modules currently being executed via their fqn (fully qualified name, also used as the key within
the state_dict).
You can access the ``is_bw`` attribute to know if you are currently running in backward or not.
Note that ``parents`` is never empty and always contains the "Global" key. The ``is_bw`` flag
will remain ``True`` after the forward until another Module is executed. If you need it to be
more accurate, please submit an issue requesting this. Adding a map from fqn to the module instance
is possible but not done yet, please submit an issue requesting this if you need it.
Example usage
.. code-block:: python
mod = torch.nn.Linear(2, 2)
with ModuleTracker() as tracker:
# Access anything during the forward pass
def my_linear(m1, m2, bias):
print(f"Current modules: {tracker.parents}")
return torch.mm(m1, m2.t()) + bias
torch.nn.functional.linear = my_linear
mod(torch.rand(2, 2))
"""
parents: set[str]
"""
A Set containing the fqn for each module currently running their forward
"""
def __init__(self) -> None:
self.parents = {"Global"}
self._known_modules: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
self._seen_modules: weakref.WeakSet = weakref.WeakSet()
self._has_callback = False
self._hooks: list[RemovableHandle] = []
def _maybe_set_engine_callback(self) -> None:
# This assumes no concurrent calls to backward
if self._has_callback:
return
def callback() -> None:
self.parents = {"Global"}
self._has_callback = False
torch.autograd.Variable._execution_engine.queue_callback(callback)
self._has_callback = True
@property
def is_bw(self):
"""
A boolean marking if this is currently running during the backward pass or not
"""
return torch._C._current_graph_task_id() != -1
def _get_mod_name(self, mod):
if mod not in self._known_modules:
self._known_modules[mod] = type(mod).__name__
mod_name = self._known_modules[mod]
if mod not in self._seen_modules:
for name, submod in mod.named_children():
self._known_modules[submod] = f"{mod_name}.{name}"
self._get_mod_name(submod)
self._seen_modules.add(mod)
return mod_name
def _get_append_fn(self, name, is_bw):
def fn(*args) -> None:
if is_bw:
self._maybe_set_engine_callback()
if name in self.parents:
logger.info(
"The module hierarchy tracking seems to be broken as this Module was already entered. %s during %s",
name,
"backward" if is_bw else "forward",
)
self.parents.add(name)
return fn
def _get_pop_fn(self, name, is_bw):
def fn(*args) -> None:
if name in self.parents:
self.parents.remove(name)
else:
logger.info(
"The Module hierarchy tracking is confused as we're exiting a Module that was never entered. %s during %s",
name,
"backward" if is_bw else "forward",
)
return fn
def _fw_pre_hook(self, mod, input) -> None:
name = self._get_mod_name(mod)
self._get_append_fn(name, False)()
args, _ = tree_flatten(input)
tensors = [a for a in args if isinstance(a, torch.Tensor) and a.requires_grad]
if tensors:
self._hooks.append(
register_multi_grad_hook(tensors, self._get_pop_fn(name, True))
)
def _fw_post_hook(self, mod, input, output) -> None:
name = self._get_mod_name(mod)
self._get_pop_fn(name, False)()
args, _ = tree_flatten(output)
tensors = [a for a in args if isinstance(a, torch.Tensor) and a.requires_grad]
if tensors:
self._hooks.append(
register_multi_grad_hook(tensors, self._get_append_fn(name, True))
)
def __enter__(self):
self._fw_pre_handle = register_module_forward_pre_hook(self._fw_pre_hook)
self._fw_post_handle = register_module_forward_hook(self._fw_post_hook)
return self
def __exit__(self, *args):
self._fw_pre_handle.remove()
self._fw_post_handle.remove()
for hook in self._hooks:
hook.remove()
self._hooks.clear()
|
ModuleTracker
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/organization_ai_conversations.py
|
{
"start": 920,
"end": 1770
}
|
class ____(serializers.Serializer):
"""Serializer for validating query parameters."""
sort = serializers.CharField(required=False, default="-timestamp")
query = serializers.CharField(required=False, allow_blank=True)
def validate_sort(self, value):
allowed_sorts = {
"timestamp",
"-timestamp",
"duration",
"-duration",
"errors",
"-errors",
"llmCalls",
"-llmCalls",
"toolCalls",
"-toolCalls",
"totalTokens",
"-totalTokens",
"totalCost",
"-totalCost",
}
if value not in allowed_sorts:
raise serializers.ValidationError(f"Invalid sort option: {value}")
return value
@region_silo_endpoint
|
OrganizationAIConversationsSerializer
|
python
|
google__jax
|
jax/experimental/source_mapper/generate_map.py
|
{
"start": 772,
"end": 2129
}
|
class ____(Protocol):
def __call__(self, *args, **kwargs) -> Sequence[common.SourceMapDump]:
...
def generate_sourcemaps(
f,
passes: Sequence[common.Pass],
**pass_kwargs
) -> SourceMapGeneratorFn:
"""Generates a SourceMapBundle for the specified compiler passes.
Args:
f: The function to compile.
passes: Which compiler passes to generate sourcemaps for.
**pass_kwargs: Keyword arguments for individual passes.
"""
def wrapper(*args, **kwargs) -> Sequence[common.SourceMapDump]:
pass_results: list[common.SourceMapDump] = []
compile_cache = {}
with tempfile.TemporaryDirectory() as work_dir:
for pass_to_eval in passes:
if pass_to_eval.compile_fn not in compile_cache:
dirname = pass_to_eval.name.replace(":", "__")
pass_work_dir = os.path.join(work_dir, dirname)
os.makedirs(pass_work_dir, exist_ok=False)
compile_result = pass_to_eval.compile_fn(
pass_work_dir, f, args, kwargs, **pass_kwargs
)
compile_cache[pass_to_eval.compile_fn] = compile_result
compile_result = compile_cache[pass_to_eval.compile_fn]
pass_results.append(pass_to_eval.generate_dump(compile_result,
**pass_kwargs))
return pass_results
return wrapper
|
SourceMapGeneratorFn
|
python
|
walkccc__LeetCode
|
solutions/2816. Double a Number Represented as a Linked List/2816.py
|
{
"start": 0,
"end": 345
}
|
class ____:
def doubleIt(self, head: ListNode | None) -> ListNode | None:
def getCarry(node: ListNode | None) -> ListNode | None:
val = node.val * 2
if node.next:
val += getCarry(node.next)
node.val = val % 10
return val // 10
if getCarry(head) == 1:
return ListNode(1, head)
return head
|
Solution
|
python
|
pytorch__pytorch
|
torch/utils/serialization/config.py
|
{
"start": 487,
"end": 654
}
|
class ____:
compute_crc32: bool = True
use_pinned_memory_for_d2h: bool = False
storage_alignment: int = 64
_install_config_module(sys.modules[__name__])
|
save
|
python
|
huggingface__transformers
|
src/transformers/models/clipseg/modeling_clipseg.py
|
{
"start": 2181,
"end": 4055
}
|
class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`CLIPSegTextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`CLIPSegVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
@dataclass
@auto_docstring
|
CLIPSegOutput
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/excel_comparison_test.py
|
{
"start": 328,
"end": 3045
}
|
class ____(unittest.TestCase):
"""
Test class for comparing a file created by XlsxWriter against a file
created by Excel.
"""
def __init__(self, *args: Any) -> None:
"""
Initialize the ExcelComparisonTest instance.
Args:
*args: Variable arguments passed to unittest.TestCase.
"""
super().__init__(*args)
# pylint: disable=invalid-name
self.maxDiff: None = None
self.got_filename: str = ""
self.exp_filename: str = ""
self.ignore_files: List[str] = []
self.ignore_elements: Dict[str, Any] = {}
self.txt_filename: str = ""
self.delete_output: bool = True
# Set the paths for the test files.
self.test_dir: str = "xlsxwriter/test/comparison/"
self.vba_dir: str = self.test_dir + "xlsx_files/"
self.image_dir: str = self.test_dir + "images/"
self.theme_dir: str = self.test_dir + "themes/"
self.output_dir: str = self.test_dir + "output/"
def set_filename(self, filename: str) -> None:
"""
Set the filenames for the Excel comparison test.
Args:
filename (str): The base filename for the test files.
"""
# The reference Excel generated file.
self.exp_filename = self.test_dir + "xlsx_files/" + filename
# The generated XlsxWriter file.
self.got_filename = self.output_dir + "py_" + filename
def set_text_file(self, filename: str) -> None:
"""
Set the filename and path for text files used in tests.
Args:
filename (str): The name of the text file.
"""
# Set the filename and path for text files used in tests.
self.txt_filename = self.test_dir + "xlsx_files/" + filename
def assertExcelEqual(self) -> None: # pylint: disable=invalid-name
"""
Compare the generated file with the reference Excel file.
Raises:
AssertionError: If the files are not equivalent.
"""
# Compare the generate file and the reference Excel file.
got, exp = _compare_xlsx_files(
self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements,
)
self.assertEqual(exp, got)
def tearDown(self) -> None:
"""
Clean up after each test by removing temporary files.
Raises:
OSError: If there is an error deleting the file.
"""
# Cleanup by removing the temp excel file created for testing.
if self.delete_output and os.path.exists(self.got_filename):
os.remove(self.got_filename)
|
ExcelComparisonTest
|
python
|
tornadoweb__tornado
|
tornado/simple_httpclient.py
|
{
"start": 1363,
"end": 1934
}
|
class ____(HTTPError):
"""Error raised by SimpleAsyncHTTPClient when the underlying stream is closed.
When a more specific exception is available (such as `ConnectionResetError`),
it may be raised instead of this one.
For historical reasons, this is a subclass of `.HTTPClientError`
which simulates a response code of 599.
.. versionadded:: 5.1
"""
def __init__(self, message: str) -> None:
super().__init__(599, message=message)
def __str__(self) -> str:
return self.message or "Stream closed"
|
HTTPStreamClosedError
|
python
|
google__jax
|
tests/core_test.py
|
{
"start": 4780,
"end": 12794
}
|
class ____(jtu.JaxTestCase):
def test_tree_map(self):
xs = ({'a': 1}, [2, 3])
ys = ({'a': 10}, [20, 30])
ys_bad = ({'a': 10, 'b': 10}, [20, 30])
zs = ({'a': 11}, [22, 33])
f = lambda x, y: x + y
assert jax.tree.map(f, xs, ys) == zs
try:
jax.tree.map(f, xs, ys_bad)
assert False
except (TypeError, ValueError):
pass
def test_tree_flatten(self):
flat, _ = jax.tree.flatten(({'a': 1}, [2, 3], 4))
assert flat == [1, 2, 3, 4]
def test_tree_unflatten(self):
tree = [(1, 2), {"roy": (3, [4, 5, ()])}]
flat, treedef = jax.tree.flatten(tree)
assert flat == [1, 2, 3, 4, 5]
tree2 = jax.tree.unflatten(treedef, flat)
nodes_equal = jax.tree.map(operator.eq, tree, tree2)
assert jax.tree.reduce(operator.and_, nodes_equal)
@jtu.sample_product(
dtype=[*jtu.dtypes.all, object, [('i', 'i4'), ('f', 'f4')]]
)
def test_is_valid_jaxtype(self, dtype):
arr = np.zeros(10, dtype=dtype)
if dtype in jtu.dtypes.all:
self.assertTrue(core.valid_jaxtype(arr))
else:
self.assertFalse(core.valid_jaxtype(arr))
def test_str_aval(self):
aval = ShapedArray((8, 2), np.int32)
self.assertEqual(str(aval), "int32[8,2]")
aval = ShapedArray((8, 2), np.int32, weak_type=True)
self.assertEqual(str(aval), "~int32[8,2]")
@parameterized.named_parameters(
(str(i), *spec) for i, spec in enumerate(test_specs))
def test_jit(self, f, args):
jtu.check_close(jit(f)(*args), f(*args))
@parameterized.named_parameters(
(str(i), *spec) for i, spec in enumerate(test_specs))
def test_jvp(self, f, args):
jtu.check_jvp(f, partial(jvp, f), args, rtol={np.float32: 3e-2})
def test_jvp_zeros(self):
def foo(x):
def bar(y):
return jnp.sin(x * y)
return jvp(bar, (3 * x,), (2 * x,))
jtu.check_eq(jit(foo)(0.5), foo(0.5))
@parameterized.parameters(test_specs)
def test_jvp_linearized(self, f, args):
jtu.check_jvp(f, partial(jvp_unlinearized, f), args,
rtol={np.float32: 3e-2})
@parameterized.named_parameters(
(str(i), *spec) for i, spec in enumerate(test_specs))
def test_vjp(self, f, args):
jtu.check_vjp(f, partial(vjp, f), args,
rtol={np.float32: 3e-1, np.float64: 1e-5},
atol={np.float32: 1e-2, np.float64: 1e-5})
def test_jvp_closure(self):
def foo(x):
def bar(y):
return jnp.multiply(x, y)
return jvp(bar, (3.0,), (1.0,))[1]
ans = jvp(foo, (1.0,), (2.0,))
assert ans == (1.0, 2.0), ans
def test_jit_closure(self):
def foo(x):
@jit
def bar(y):
return x + y
return bar(0.0)
assert jvp(foo, (1.0,), (2.0,)) == (1.0, 2.0)
def test_simple_jit(self):
def foo(x):
if x.shape == ():
return x + 1.
else:
return x + 2.
foo2 = jit(foo)
foo3 = jit(foo2)
x1, y1 = np.array(1.0), np.array(2.0)
assert foo(x1) == y1
assert foo2(x1) == y1
assert foo3(x1) == y1
x2, y2 = np.array([1.0, 2.0]), np.array([3.0, 4.0])
assert np.all(foo(x2) == y2)
assert np.all(foo2(x2) == y2)
assert np.all(foo3(x2) == y2)
def test_product_jit(self):
def foo(x, tup):
y, z = tup
w = x + z
return (w, {'x': y}), z
foo2 = jit(foo)
foo3 = jit(foo2)
args = (1.0, (2.0, 3.0))
expected_output = ((4.0, {'x': 2.0}), 3.0)
assert foo(*args) == expected_output
assert foo2(*args) == expected_output
assert foo3(*args) == foo(*args)
def test_jvp_repeated_fwd(self):
d_sin = fwd_deriv(jnp.sin)
d2_sin = fwd_deriv(d_sin)
d3_sin = fwd_deriv(d2_sin)
assert d_sin(0.0) == 1.0
assert d2_sin(0.0) == 0.0
assert d3_sin(0.0) == -1.0
@jtu.thread_unsafe_test() # gc isn't predictable when threaded
def test_reference_cycles(self):
if jtu.TEST_NUM_THREADS.value > 1:
self.skipTest("Test does not work with multiple threads")
gc.collect()
def f(x):
return x.sum()
fn = partial(linearize, f)
params = jnp.zeros([])
debug = gc.get_debug()
try:
fn(params)
gc.set_debug(gc.DEBUG_SAVEALL)
self.assertEqual(gc.collect(), 0, msg=str(gc.garbage))
finally:
gc.set_debug(debug)
@jtu.thread_unsafe_test() # gc isn't predictable when threaded
def test_reference_cycles_jit(self):
if jtu.TEST_NUM_THREADS.value > 1:
self.skipTest("Test does not work with multiple threads")
gc.collect()
def f(x):
return x.sum()
fn = jit(f)
params = jnp.zeros([])
debug = gc.get_debug()
try:
fn(params).block_until_ready()
gc.set_debug(gc.DEBUG_SAVEALL)
self.assertEqual(gc.collect(), 0, msg=str(gc.garbage))
finally:
gc.set_debug(debug)
def test_invalid_shape_error_with_jit_tracer_passed(self):
@jax.jit
def g_jit(x):
return jnp.zeros(shape=(2, x))
@jax.vmap
def g_vmap(x):
return jnp.zeros(shape=(2, x))
with self.assertRaisesRegex(
TypeError,
'This concrete value was not available in'
+ ' Python because it depends on',
):
g_jit(1)
with self.assertRaisesRegex(TypeError,
'This BatchTracer with object id'):
g_vmap(jnp.ones((1, )))
def test_aval_str_short_mem_space(self):
aval = core.ShapedArray((8,), jnp.float32,
memory_space=jax.memory.Space.Host)
self.assertEqual(aval.str_short(True), "f32<host>[8]")
aval = core.ShapedArray((8,), jnp.float32,
memory_space=jax.memory.Space.Device)
self.assertEqual(aval.str_short(True), "f32[8]")
def test_dropvar_avals(self):
def f(x):
def body(c, _):
x1, x2 = c
return (2 * x1, 2 * x2), None
(x1, x2), _ = jax.lax.scan(body, (x, x), None, length=1)
return [x2]
aval = core.ShapedArray((), jnp.dtype('int32'))
pval = pe.PartialVal.unknown(aval)
jaxpr, _, _ = pe.trace_to_jaxpr_nounits(
lu.wrap_init(f, debug_info=debug_info("test", f, (0,), {})),
[pval], False)
dropvar, b = jaxpr.eqns[0].outvars
self.assertEqual(dropvar.aval, aval)
def test_input_residual_forwarding(self):
# https://github.com/jax-ml/jax/pull/11151
x = jnp.arange(3 * 4.).reshape(3, 4)
y = jnp.arange(4 * 3.).reshape(4, 3)
g = jax.jit(jnp.dot)
def f(y):
z, g_lin = jax.linearize(lambda y: g(x, y), y)
zdot = g_lin(y)
return z, zdot
jaxpr = jax.make_jaxpr(f)(y)
e1, e2 = jaxpr.jaxpr.eqns
self.assertLen(e1.outvars, 1) # only primal out, no residuals
self.assertEqual(e1.outvars[0].aval.shape, (3, 3)) # only primal out shape
def test_tracer_reprs(self):
def f(x):
nonlocal x_repr
x_repr = repr(x)
return x.sum()
x_repr = ""
jax.jit(f)(jnp.arange(10.0, dtype='float32'))
self.assertEqual(x_repr, "JitTracer<float32[10]>")
jax.vmap(f)(jnp.arange(20, dtype='int32'))
self.assertEqual(x_repr, "VmapTracer<int32[]>")
jax.grad(f)(jnp.float16(1.0))
self.assertRegex(x_repr, r"(Grad)|(Linearize)Tracer<float16\[\]>")
jax.jacrev(f)(jnp.arange(12, dtype='float32'))
self.assertRegex(x_repr, r"(Grad)|(Linearize)Tracer<float32\[12\]>")
jax.jacfwd(f)(jnp.arange(14, dtype='float32'))
self.assertRegex(x_repr, r"(Grad)|(Linearize)Tracer<float32\[14\]>")
def test_verbose_tracer_reprs(self):
# Verbose reprs, avaiable via tracer._pretty_print()
def f(x):
nonlocal x_repr
x_repr = x._pretty_print(verbose=True).format()
return x.sum()
x_repr = ""
jax.jit(f)(jnp.arange(10.0, dtype='float32'))
self.assertRegex(x_repr, r"^Traced<float32\[10\]>with<DynamicJaxprTrace>")
jax.vmap(f)(jnp.arange(20, dtype='int32'))
self.assertRegex(x_repr, r"^Traced<int32\[\]>with<BatchTrace>")
jax.grad(f)(jnp.float16(1.0))
self.assertRegex(x_repr, r"^Traced<float16\[\]>with<(JVP)|(Linearize)Trace>")
@jtu.with_config(jax_pprint_use_color=False)
|
CoreTest
|
python
|
encode__django-rest-framework
|
tests/test_generics.py
|
{
"start": 14415,
"end": 14643
}
|
class ____(serializers.ModelSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True, queryset=ClassB.objects.all()
)
class Meta:
model = ClassA
fields = '__all__'
|
ClassASerializer
|
python
|
psf__requests
|
src/requests/models.py
|
{
"start": 9458,
"end": 20950
}
|
class ____(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(
self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None,
):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return f"<PreparedRequest [{self.method}]>"
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
import idna
try:
host = idna.encode(host, uts46=True).decode("utf-8")
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode("utf8")
else:
url = str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ":" in url and not url.lower().startswith("http"):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema(
f"Invalid URL {url!r}: No scheme supplied. "
f"Perhaps you meant https://{url}?"
)
if not host:
raise InvalidURL(f"Invalid URL {url!r}: No host supplied")
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL("URL has an invalid label.")
elif host.startswith(("*", ".")):
raise InvalidURL("URL has an invalid label.")
# Carefully reconstruct the network location
netloc = auth or ""
if netloc:
netloc += "@"
netloc += host
if port:
netloc += f":{port}"
# Bare domains aren't valid URLs.
if not path:
path = "/"
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = f"{query}&{enc_params}"
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = "application/json"
try:
body = complexjson.dumps(json, allow_nan=False)
except ValueError as ve:
raise InvalidJSONError(ve, request=self)
if not isinstance(body, bytes):
body = body.encode("utf-8")
is_stream = all(
[
hasattr(data, "__iter__"),
not isinstance(data, (basestring, list, tuple, Mapping)),
]
)
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, "tell", None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError(
"Streamed bodies and files are mutually exclusive."
)
if length:
self.headers["Content-Length"] = builtin_str(length)
else:
self.headers["Transfer-Encoding"] = "chunked"
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, "read"):
content_type = None
else:
content_type = "application/x-www-form-urlencoded"
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ("content-type" not in self.headers):
self.headers["Content-Type"] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers["Content-Length"] = builtin_str(length)
elif (
self.method not in ("GET", "HEAD")
and self.headers.get("Content-Length") is None
):
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers["Content-Length"] = "0"
def prepare_auth(self, auth, url=""):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers["Cookie"] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
|
PreparedRequest
|
python
|
django-extensions__django-extensions
|
tests/testapp/models.py
|
{
"start": 14014,
"end": 14237
}
|
class ____(models.Model):
field_to_update = models.BooleanField(default=True)
custom_modified = ModificationDateTimeField()
class Meta:
app_label = "django_extensions"
|
CustomModelModificationDateTimeField
|
python
|
kamyu104__LeetCode-Solutions
|
Python/height-of-binary-tree-after-subtree-removal-queries.py
|
{
"start": 159,
"end": 1504
}
|
class ____(object):
def treeQueries(self, root, queries):
"""
:type root: Optional[TreeNode]
:type queries: List[int]
:rtype: List[int]
"""
def iter_dfs(root):
top = collections.defaultdict(lambda: [0]*2)
depth, height = {}, {}
stk = [(1, (root, 0))]
while stk:
step, (curr, d) = stk.pop()
if step == 1:
if not curr:
continue
stk.append((2, (curr, d)))
stk.append((1, (curr.right, d+1)))
stk.append((1, (curr.left, d+1)))
elif step == 2:
h = 1+max((height[curr.left.val] if curr.left else 0),
(height[curr.right.val] if curr.right else 0))
if h > top[d][0]:
top[d][0], top[d][1] = h, top[d][0]
elif h > top[d][1]:
top[d][1] = h
depth[curr.val], height[curr.val] = d, h
return top, depth, height
top, depth, height = iter_dfs(root)
return [(depth[q]-1)+(top[depth[q]][0] if height[q] != top[depth[q]][0] else top[depth[q]][1]) for q in queries]
# Time: O(n)
# Space: O(n)
import collections
# dfs
|
Solution
|
python
|
TheAlgorithms__Python
|
hashes/sha256.py
|
{
"start": 574,
"end": 5730
}
|
class ____:
"""
Class to contain the entire pipeline for SHA1 Hashing Algorithm
>>> SHA256(b'Python').hash
'18885f27b5af9012df19e496460f9294d5ab76128824c6f993787004f6d9a7db'
>>> SHA256(b'hello world').hash
'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9'
"""
def __init__(self, data: bytes) -> None:
self.data = data
# Initialize hash values
self.hashes = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
self.round_constants = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
self.preprocessed_data = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def preprocessing(data: bytes) -> bytes:
padding = b"\x80" + (b"\x00" * (63 - (len(data) + 8) % 64))
big_endian_integer = struct.pack(">Q", (len(data) * 8))
return data + padding + big_endian_integer
def final_hash(self) -> None:
# Convert into blocks of 64 bytes
self.blocks = [
self.preprocessed_data[x : x + 64]
for x in range(0, len(self.preprocessed_data), 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
words = list(struct.unpack(">16L", block))
# add 48 0-ed integers
words += [0] * 48
a, b, c, d, e, f, g, h = self.hashes
for index in range(64):
if index > 15:
# modify the zero-ed indexes at the end of the array
s0 = (
self.ror(words[index - 15], 7)
^ self.ror(words[index - 15], 18)
^ (words[index - 15] >> 3)
)
s1 = (
self.ror(words[index - 2], 17)
^ self.ror(words[index - 2], 19)
^ (words[index - 2] >> 10)
)
words[index] = (
words[index - 16] + s0 + words[index - 7] + s1
) % 0x100000000
# Compression
s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25)
ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g)
temp1 = (
h + s1 + ch + self.round_constants[index] + words[index]
) % 0x100000000
s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22)
maj = (a & b) ^ (a & c) ^ (b & c)
temp2 = (s0 + maj) % 0x100000000
h, g, f, e, d, c, b, a = (
g,
f,
e,
((d + temp1) % 0x100000000),
c,
b,
a,
((temp1 + temp2) % 0x100000000),
)
mutated_hash_values = [a, b, c, d, e, f, g, h]
# Modify final values
self.hashes = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes)
]
self.hash = "".join([hex(value)[2:].zfill(8) for value in self.hashes])
def ror(self, value: int, rotations: int) -> int:
"""
Right rotate a given unsigned number by a certain amount of rotations
"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
|
SHA256
|
python
|
catalyst-team__catalyst
|
examples/reinforcement_learning/dqn.py
|
{
"start": 1539,
"end": 4481
}
|
class ____(IterableDataset):
def __init__(self, buffer: ReplayBuffer, epoch_size: int = int(1e3)):
self.buffer = buffer
self.epoch_size = epoch_size
def __iter__(self) -> Iterator[Sequence[np.array]]:
states, actions, rewards, dones, next_states = self.buffer.sample(
self.epoch_size
)
for i in range(len(dones)):
yield states[i], actions[i], rewards[i], dones[i], next_states[i]
def __len__(self) -> int:
return self.epoch_size
def soft_update(target: nn.Module, source: nn.Module, tau: float) -> None:
"""Updates the `target` data with the `source` one smoothing by ``tau`` (inplace operation)."""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
# DQN
def get_action(env, network: nn.Module, state: np.array, epsilon: float = -1) -> int:
if np.random.random() < epsilon:
action = env.action_space.sample()
else:
state = torch.tensor(state[None], dtype=torch.float32)
q_values = network(state).detach().cpu().numpy()[0]
action = np.argmax(q_values)
return int(action)
def generate_session(
env,
network: nn.Module,
t_max: int = 1000,
epsilon: float = -1,
replay_buffer: Optional[ReplayBuffer] = None,
) -> Tuple[float, int]:
total_reward = 0
state = env.reset()
for t in range(t_max):
action = get_action(env, network, state=state, epsilon=epsilon)
next_state, reward, done, _ = env.step(action)
if replay_buffer is not None:
transition = Transition(state, action, reward, done, next_state)
replay_buffer.append(transition)
total_reward += reward
state = next_state
if done:
break
return total_reward, t
def generate_sessions(
env,
network: nn.Module,
t_max: int = 1000,
epsilon: float = -1,
replay_buffer: ReplayBuffer = None,
num_sessions: int = 100,
) -> Tuple[float, int]:
sessions_reward, sessions_steps = 0, 0
for i_episone in range(num_sessions):
r, t = generate_session(
env=env,
network=network,
t_max=t_max,
epsilon=epsilon,
replay_buffer=replay_buffer,
)
sessions_reward += r
sessions_steps += t
return sessions_reward, sessions_steps
def get_network(env, num_hidden: int = 128):
inner_fn = get_optimal_inner_init(nn.ReLU)
outer_fn = outer_init
network = torch.nn.Sequential(
nn.Linear(env.observation_space.shape[0], num_hidden),
nn.ReLU(),
nn.Linear(num_hidden, num_hidden),
nn.ReLU(),
)
head = nn.Linear(num_hidden, env.action_space.n)
network.apply(inner_fn)
head.apply(outer_fn)
return torch.nn.Sequential(network, head)
# Catalyst
|
ReplayDataset
|
python
|
numba__numba
|
numba/core/typing/npdatetime.py
|
{
"start": 8539,
"end": 9202
}
|
class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 2
error_msg = "DatetimeMinMax requires both arguments to be NPDatetime type or both arguments to be NPTimedelta types"
assert isinstance(args[0], (types.NPDatetime, types.NPTimedelta)), error_msg
if isinstance(args[0], types.NPDatetime):
if not isinstance(args[1], types.NPDatetime):
raise errors.TypingError(error_msg)
else:
if not isinstance(args[1], types.NPTimedelta):
raise errors.TypingError(error_msg)
return signature(args[0], *args)
|
DatetimeMinMax
|
python
|
huggingface__transformers
|
examples/modular-transformers/modeling_dummy_bert.py
|
{
"start": 20536,
"end": 21103
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
DummyBertPooler
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/posts_response_builder.py
|
{
"start": 357,
"end": 758
}
|
class ____(HttpResponseBuilder):
@classmethod
def posts_response(cls, request_without_cursor_for_pagination: Optional[HttpRequest] = None) -> "PostsResponseBuilder":
return cls(
find_template("posts", __file__),
FieldPath("posts"),
CursorBasedPaginationStrategy(http_request_to_str(request_without_cursor_for_pagination)),
)
|
PostsResponseBuilder
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_events.py
|
{
"start": 62212,
"end": 72031
}
|
class ____(RemoveORMEventsGlobally, _fixtures.FixtureTest):
"""test event listeners against unmapped classes.
This incurs special logic. Note if we ever do the "remove" case,
it has to get all of these, too.
"""
run_inserts = None
def test_deferred_map_event(self):
"""
1. mapper event listen on class
2. map class
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, raw=True)
m = self.mapper_registry.map_imperatively(User, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [5])
def test_deferred_map_event_subclass_propagate(self):
"""
1. mapper event listen on class, w propagate
2. map only subclass of class
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
class SubSubUser(SubUser):
pass
canary = Mock()
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", canary, propagate=True, raw=True)
m = self.mapper_registry.map_imperatively(SubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary.mock_calls, [call(5, 6, 7)])
m2 = self.mapper_registry.map_imperatively(SubSubUser, users)
m2.dispatch.before_insert(8, 9, 10)
eq_(canary.mock_calls, [call(5, 6, 7), call(8, 9, 10)])
def test_deferred_map_event_subclass_no_propagate(self):
"""
1. mapper event listen on class, w/o propagate
2. map only subclass of class
3. event fire should not receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, propagate=False)
m = self.mapper_registry.map_imperatively(SubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [])
def test_deferred_map_event_subclass_post_mapping_propagate(self):
"""
1. map only subclass of class
2. mapper event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
m = self.mapper_registry.map_imperatively(SubUser, users)
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, propagate=True, raw=True)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [5])
def test_deferred_map_event_subclass_post_mapping_propagate_two(self):
"""
1. map only subclass of class
2. mapper event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
class SubSubUser(SubUser):
pass
m = self.mapper_registry.map_imperatively(SubUser, users)
canary = Mock()
event.listen(User, "before_insert", canary, propagate=True, raw=True)
m2 = self.mapper_registry.map_imperatively(SubSubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary.mock_calls, [call(5, 6, 7)])
m2.dispatch.before_insert(8, 9, 10)
eq_(canary.mock_calls, [call(5, 6, 7), call(8, 9, 10)])
def test_deferred_instance_event_subclass_post_mapping_propagate(self):
"""
1. map only subclass of class
2. instance event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
m = self.mapper_registry.map_imperatively(SubUser, users)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=True, raw=True)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
def test_deferred_instance_event_plain(self):
"""
1. instance event listen on class, w/o propagate
2. map class
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, raw=True)
m = self.mapper_registry.map_imperatively(User, users)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
def test_deferred_instance_event_subclass_propagate_subclass_only(self):
"""
1. instance event listen on class, w propagate
2. map two subclasses of class
3. event fire on each class should receive one and only one event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
class SubUser2(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=True, raw=True)
m = self.mapper_registry.map_imperatively(SubUser, users)
m2 = self.mapper_registry.map_imperatively(SubUser2, users)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
m2.class_manager.dispatch.load(5)
eq_(canary, [5, 5])
def test_deferred_instance_event_subclass_propagate_baseclass(self):
"""
1. instance event listen on class, w propagate
2. map one subclass of class, map base class, leave 2nd subclass
unmapped
3. event fire on sub should receive one and only one event
4. event fire on base should receive one and only one event
5. map 2nd subclass
6. event fire on 2nd subclass should receive one and only one event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
class SubUser2(User):
pass
canary = Mock()
event.listen(User, "load", canary, propagate=True, raw=False)
# reversing these fixes....
m = self.mapper_registry.map_imperatively(SubUser, users)
m2 = self.mapper_registry.map_imperatively(User, users)
instance = Mock()
m.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj())])
m2.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj()), call(instance.obj())])
m3 = self.mapper_registry.map_imperatively(SubUser2, users)
m3.class_manager.dispatch.load(instance)
eq_(
canary.mock_calls,
[call(instance.obj()), call(instance.obj()), call(instance.obj())],
)
def test_deferred_instance_event_subclass_no_propagate(self):
"""
1. instance event listen on class, w/o propagate
2. map subclass
3. event fire on subclass should not receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=False)
m = self.mapper_registry.map_imperatively(SubUser, users)
m.class_manager.dispatch.load(5)
eq_(canary, [])
def test_deferred_instrument_event(self):
User = self.classes.User
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(
User
)
eq_(canary, [User])
def test_isolation_instrument_event(self):
User = self.classes.User
class Bar:
pass
canary = []
def evt(x):
canary.append(x)
event.listen(Bar, "attribute_instrument", evt)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(
User
)
eq_(canary, [])
@testing.requires.predictable_gc
def test_instrument_event_auto_remove(self):
class Bar:
pass
dispatch = instrumentation._instrumentation_factory.dispatch
assert not dispatch.attribute_instrument
event.listen(Bar, "attribute_instrument", lambda: None)
eq_(len(dispatch.attribute_instrument), 1)
del Bar
gc_collect()
assert not dispatch.attribute_instrument
def test_deferred_instrument_event_subclass_propagate(self):
User = self.classes.User
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt, propagate=True)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(
SubUser
)
eq_(canary, [SubUser])
def test_deferred_instrument_event_subclass_no_propagate(self):
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt, propagate=False)
self.mapper_registry.map_imperatively(SubUser, users)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(
5
)
eq_(canary, [])
|
DeferredMapperEventsTest
|
python
|
Textualize__textual
|
src/textual/widgets/_pretty.py
|
{
"start": 227,
"end": 1379
}
|
class ____(Widget):
"""A pretty-printing widget.
Used to pretty-print any object.
"""
DEFAULT_CSS = """
Pretty {
height: auto;
}
"""
def __init__(
self,
object: Any,
*,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
) -> None:
"""Initialise the `Pretty` widget.
Args:
object: The object to pretty-print.
name: The name of the pretty widget.
id: The ID of the pretty in the DOM.
classes: The CSS classes of the pretty.
"""
super().__init__(name=name, id=id, classes=classes)
self.shrink = False
self._pretty_renderable = PrettyRenderable(object)
def render(self) -> RenderResult:
return self._pretty_renderable
def update(self, object: object) -> None:
"""Update the content of the pretty widget.
Args:
object: The object to pretty-print.
"""
self._pretty_renderable = PrettyRenderable(object)
self.clear_cached_dimensions()
self.refresh(layout=True)
|
Pretty
|
python
|
SmileyChris__easy-thumbnails
|
easy_thumbnails/processors.py
|
{
"start": 1097,
"end": 14137
}
|
class ____:
def __new__(cls, im):
if getattr(im, "n_frames", 1) > 1:
return super().__new__(cls)
return im
def __init__(self, im):
self.im = im
def apply_to_frames(self, method, *args, **kwargs):
new_frames = []
for i in range(self.im.n_frames):
self.im.seek(i)
new_frames.append(method(*args, **kwargs))
write_to = BytesIO()
new_frames[0].save(
write_to, format=self.im.format, save_all=True, append_images=new_frames[1:]
)
return Image.open(write_to)
def __getattr__(self, key):
method = getattr(self.im, key)
return partial(self.apply_to_frames, method)
def colorspace(im, bw=False, replace_alpha=False, **kwargs):
"""
Convert images to the correct color space.
A passive option (i.e. always processed) of this method is that all images
(unless grayscale) are converted to RGB colorspace.
This processor should be listed before :func:`scale_and_crop` so palette is
changed before the image is resized.
bw
Make the thumbnail grayscale (not really just black & white).
replace_alpha
Replace any transparency layer with a solid color. For example,
``replace_alpha='#fff'`` would replace the transparency layer with
white.
"""
if im.mode == 'I':
# PIL (and pillow) have can't convert 16 bit grayscale images to lower
# modes, so manually convert them to an 8 bit grayscale.
im = FrameAware(im).point(list(_points_table()), "L")
is_transparent = utils.is_transparent(im)
is_grayscale = im.mode in ('L', 'LA')
new_mode = im.mode
if is_grayscale or bw:
new_mode = 'L'
else:
new_mode = 'RGB'
if is_transparent:
if replace_alpha:
if not getattr(im, 'is_animated', False):
if im.mode != 'RGBA':
im = FrameAware(im).convert('RGBA')
base = Image.new('RGBA', im.size, replace_alpha)
base.paste(im, mask=im)
im = base
else:
frames = []
for i in range(im.n_frames):
im.seek(i)
if im.mode != 'RGBA':
im = FrameAware(im).convert('RGBA')
base = Image.new('RGBA', im.size, replace_alpha)
base.paste(im, mask=im)
frames.append(base)
write_to = BytesIO()
frames[0].save(
write_to, format=im.format, save_all=True, append_images=frames[1:]
)
return Image.open(write_to)
else:
new_mode = new_mode + 'A'
if im.mode != new_mode:
im = FrameAware(im).convert(new_mode)
return im
def autocrop(im, autocrop=False, **kwargs):
"""
Remove any unnecessary whitespace from the edges of the source image.
This processor should be listed before :func:`scale_and_crop` so the
whitespace is removed from the source image before it is resized.
autocrop
Activates the autocrop method for this image.
"""
if autocrop:
# If transparent, flatten.
if utils.is_transparent(im):
no_alpha = Image.new('L', im.size, (255))
no_alpha.paste(im, mask=im.split()[-1])
else:
no_alpha = im.convert('L')
# Convert to black and white image.
bw = no_alpha.convert('L')
# bw = bw.filter(ImageFilter.MedianFilter)
# White background.
bg = Image.new('L', im.size, 255)
bbox = ImageChops.difference(bw, bg).getbbox()
if bbox:
im = FrameAware(im).crop(bbox)
return im
def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
**kwargs):
"""
Handle scaling and cropping the source image.
Images can be scaled / cropped against a single dimension by using zero
as the placeholder in the size. For example, ``size=(100, 0)`` will cause
the image to be resized to 100 pixels wide, keeping the aspect ratio of
the source image.
crop
Crop the source image height or width to exactly match the requested
thumbnail size (the default is to proportionally resize the source
image to fit within the requested thumbnail size).
By default, the image is centered before being cropped. To crop from
the edges, pass a comma separated string containing the ``x`` and ``y``
percentage offsets (negative values go from the right/bottom). Some
examples follow:
* ``crop="0,0"`` will crop from the left and top edges.
* ``crop="-10,-0"`` will crop from the right edge (with a 10% offset)
and the bottom edge.
* ``crop=",0"`` will keep the default behavior for the x axis
(horizontally centering the image) and crop from the top edge.
The image can also be "smart cropped" by using ``crop="smart"``. The
image is incrementally cropped down to the requested size by removing
slices from edges with the least entropy.
Finally, you can use ``crop="scale"`` to simply scale the image so that
at least one dimension fits within the size dimensions given (you may
want to use the upscale option too).
upscale
Allow upscaling of the source image during scaling.
zoom
A percentage to zoom in on the scaled image. For example, a zoom of
``40`` will clip 20% off each side of the source image before
thumbnailing.
target
Set the focal point as a percentage for the image if it needs to be
cropped (defaults to ``(50, 50)``).
For example, ``target="10,20"`` will set the focal point as 10% and 20%
from the left and top of the image, respectively. If the image needs to
be cropped, it will trim off the right and bottom edges until the focal
point is centered.
Can either be set as a two-item tuple such as ``(20, 30)`` or a comma
separated string such as ``"20,10"``.
A null value such as ``(20, None)`` or ``",60"`` will default to 50%.
"""
source_x, source_y = [float(v) for v in im.size]
target_x, target_y = [int(v) for v in size]
if crop or not target_x or not target_y:
scale = max(
1.0 if source_x == 0 else target_x / source_x,
1.0 if source_y == 0 else target_y / source_y,
)
else:
scale = min(
1.0 if source_x == 0 else target_x / source_x,
1.0 if source_y == 0 else target_y / source_y,
)
# Handle one-dimensional targets.
if not target_x:
target_x = round(source_x * scale)
elif not target_y:
target_y = round(source_y * scale)
if zoom:
if not crop:
target_x = round(source_x * scale)
target_y = round(source_y * scale)
crop = True
scale *= (100 + int(zoom)) / 100.0
# Check Pillow version and use right constant
try:
# Pillow >= 9.1.0
Image__Resampling__LANCZOS = Image.Resampling.LANCZOS
except AttributeError:
# Pillow < 9.1.0
Image__Resampling__LANCZOS = Image.ANTIALIAS
if scale < 1.0 or (scale > 1.0 and upscale):
# Resize the image to the target size boundary. Round the scaled
# boundary sizes to avoid floating point errors.
im = FrameAware(im).resize(
(int(round(source_x * scale)), int(round(source_y * scale))),
resample=Image__Resampling__LANCZOS,
)
if crop:
# Use integer values now.
source_x, source_y = im.size
# Difference between new image size and requested size.
diff_x = int(source_x - min(source_x, target_x))
diff_y = int(source_y - min(source_y, target_y))
if crop != 'scale' and (diff_x or diff_y):
if isinstance(target, str):
target = re.match(r'(\d+)?,(\d+)?$', target)
if target:
target = target.groups()
if target:
focal_point = [int(n) if (n or n == 0) else 50 for n in target]
else:
focal_point = 50, 50
# Crop around the focal point
halftarget_x, halftarget_y = int(target_x / 2), int(target_y / 2)
focal_point_x = int(source_x * focal_point[0] / 100)
focal_point_y = int(source_y * focal_point[1] / 100)
box = [
max(0, min(source_x - target_x, focal_point_x - halftarget_x)),
max(0, min(source_y - target_y, focal_point_y - halftarget_y)),
]
box.append(int(min(source_x, box[0] + target_x)))
box.append(int(min(source_y, box[1] + target_y)))
# See if an edge cropping argument was provided.
edge_crop = isinstance(crop, str) and re.match(
r'(?:(-?)(\d+))?,(?:(-?)(\d+))?$', crop
)
if edge_crop and filter(None, edge_crop.groups()):
x_right, x_crop, y_bottom, y_crop = edge_crop.groups()
if x_crop:
offset = min(int(target_x) * int(x_crop) // 100, diff_x)
if x_right:
box[0] = diff_x - offset
box[2] = source_x - offset
else:
box[0] = offset
box[2] = source_x - (diff_x - offset)
if y_crop:
offset = min(int(target_y) * int(y_crop) // 100, diff_y)
if y_bottom:
box[1] = diff_y - offset
box[3] = source_y - offset
else:
box[1] = offset
box[3] = source_y - (diff_y - offset)
# See if the image should be 'smart cropped".
elif crop == 'smart':
left = top = 0
right, bottom = source_x, source_y
while diff_x:
slice = min(diff_x, max(diff_x // 5, 10))
start = im.crop((left, 0, left + slice, source_y))
end = im.crop((right - slice, 0, right, source_y))
add, remove = _compare_entropy(start, end, slice, diff_x)
left += add
right -= remove
diff_x = diff_x - add - remove
while diff_y:
slice = min(diff_y, max(diff_y // 5, 10))
start = im.crop((0, top, source_x, top + slice))
end = im.crop((0, bottom - slice, source_x, bottom))
add, remove = _compare_entropy(start, end, slice, diff_y)
top += add
bottom -= remove
diff_y = diff_y - add - remove
box = (left, top, right, bottom)
# Finally, crop the image!
im = FrameAware(im).crop(box)
return im
def filters(im, detail=False, sharpen=False, **kwargs):
"""
Pass the source image through post-processing filters.
sharpen
Sharpen the thumbnail image (using the PIL sharpen filter)
detail
Add detail to the image, like a mild *sharpen* (using the PIL
``detail`` filter).
"""
if detail:
im = FrameAware(im).filter(ImageFilter.DETAIL)
if sharpen:
im = FrameAware(im).filter(ImageFilter.SHARPEN)
return im
def background(im, size, background=None, **kwargs):
"""
Add borders of a certain color to make the resized image fit exactly within
the dimensions given.
background
Background color to use
"""
if not background:
# Primary option not given, nothing to do.
return im
if not size[0] or not size[1]:
# One of the dimensions aren't specified, can't do anything.
return im
x, y = im.size
if x >= size[0] and y >= size[1]:
# The image is already equal to (or larger than) the expected size, so
# there's nothing to do.
return im
im = colorspace(im, replace_alpha=background, **kwargs)
new_im = Image.new('RGB', size, background)
if new_im.mode != im.mode:
new_im = new_im.convert(im.mode)
offset = (size[0] - x) // 2, (size[1] - y) // 2
# animated format (gif/webp/...) support manually added.
if not getattr(im, 'is_animated', False):
new_im.paste(im, offset)
return new_im
else:
frames = []
for i in range(im.n_frames):
im.seek(i)
copied_new_im = new_im.copy()
copied_new_im.paste(im, offset)
frames.append(copied_new_im)
write_to = BytesIO()
frames[0].save(
write_to, format=im.format, save_all=True, append_images=frames[1:]
)
return Image.open(write_to)
|
FrameAware
|
python
|
django__django
|
tests/backends/models.py
|
{
"start": 582,
"end": 711
}
|
class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().exclude(year=1000)
|
SchoolClassManager
|
python
|
neetcode-gh__leetcode
|
python/2306-naming-a-company.py
|
{
"start": 0,
"end": 927
}
|
class ____(object):
def distinctNames(self, ideas):
suffixes = dict()
for idea in ideas:
if idea[0] not in suffixes:
suffixes[idea[0]] = set()
suffixes[idea[0]].add(idea[1:])
if len(suffixes) < 2:
return 0
num_distinct_names = 0
alphabet = 'abcdefghijklmnopqrstuvwxyz'
for prefix_1 in suffixes:
for prefix_2 in suffixes:
if prefix_2 > prefix_1:
num_suffixes_1 = len(suffixes[prefix_1])
num_suffixes_2 = len(suffixes[prefix_2])
for suffix in suffixes[prefix_1]:
if suffix in suffixes[prefix_2]:
num_suffixes_1 -= 1
num_suffixes_2 -= 1
num_distinct_names += 2 * num_suffixes_1 * num_suffixes_2
return num_distinct_names
|
Solution
|
python
|
ray-project__ray
|
python/ray/exceptions.py
|
{
"start": 27322,
"end": 27449
}
|
class ____(RayError, TimeoutError):
"""Indicates that a call to the worker timed out."""
pass
@PublicAPI
|
GetTimeoutError
|
python
|
google__jax
|
jax/_src/interpreters/mlir.py
|
{
"start": 8340,
"end": 21064
}
|
class ____(Protocol):
def __call__(self, val: Any, aval: core.AbstractValue | None) -> IrValues:
"""Builds an IR representation for a constant `val`.
A JAX value is represented by zero or more IR values."""
_constant_handlers : dict[type, ConstantHandler] = {}
def register_constant_handler(type_: type, handler_fun: ConstantHandler):
_constant_handlers[type_] = handler_fun
def get_constant_handler(type_: type) -> ConstantHandler:
return _constant_handlers[type_]
def ir_constant(
val: Any, *,
const_lowering: dict[tuple[int, core.AbstractValue], IrValues] | None = None,
aval: core.AbstractValue | None = None
) -> IrValues:
"""Translate a Python `val` to an IR constant.
See https://docs.jax.dev/en/latest/internals/constants.html.
Args:
val: a Python value to be translated to a constant.
const_lowering: an optional dictionary with known lowering for some
constants, indexed by `id`. This is used, e.g., when we pass constants
as MLIR function arguments.
aval: the abstract value of `val`, if known. Required where ambiguous, e.g.
for Python scalars.
Returns:
A representation of the constant as an IR value or sequence of IR values.
"""
if const_lowering is not None:
if np.shape(val) and (c_val := const_lowering.get((id(val), aval))) is not None:
return c_val
for t in type(val).__mro__:
handler = _constant_handlers.get(t)
if handler:
out = handler(val, aval)
assert _is_ir_values(out), (type(val), out)
return out
if hasattr(val, '__jax_array__'):
return ir_constant(val.__jax_array__())
raise TypeError(f"No constant handler for type: {type(val)}")
def _numpy_array_constant(x: np.ndarray | np.generic) -> IrValues:
element_type = dtype_to_ir_type(x.dtype)
shape = x.shape
if x.dtype == np.bool_:
x = np.packbits(x, bitorder='little') # type: ignore
x = np.ascontiguousarray(x)
attr = ir.DenseElementsAttr.get(x, type=element_type, shape=shape) # type: ignore
return hlo.constant(attr)
def _masked_array_constant_handler(*args, **kwargs):
raise ValueError("numpy masked arrays are not supported as direct inputs to JAX functions. "
"Use arr.filled() to convert the value to a standard numpy array.")
register_constant_handler(np.ma.MaskedArray, _masked_array_constant_handler)
def _shape_dtype_struct_constant_handler(*args, **kwargs):
raise TypeError("A ShapeDtypeStruct does not have a value and cannot be "
"used as a constant in a JAX function.")
register_constant_handler(core.ShapeDtypeStruct,
_shape_dtype_struct_constant_handler)
def _ndarray_constant_handler(val: np.ndarray | np.generic,
aval: core.AbstractValue | None) -> IrValues:
"""Constant handler for ndarray literals, handling zero-size strides.
In most cases this function calls _numpy_array_constant(val) except it has
special handling of arrays with any strides of size zero: for those, it
generates appropriate calls to NumpyArrayConstant, Broadcast, and Transpose
to avoid staging in large literals that might arise from np.zeros or np.ones
or the output of lax.broadcast (which uses np.broadcast_to which in turn
uses size-zero strides).
Args:
val: an ndarray.
Returns:
An XLA ComputationDataHandle / XlaOp representing the constant ndarray
staged into the XLA Computation.
"""
if val.dtype == dtypes.float0:
return _numpy_array_constant(np.zeros(val.shape, dtype=np.bool_))
elif 0 in val.strides and val.size > 0:
zero_stride_axes, = np.where(np.equal(0, val.strides))
other_axes, = np.where(np.not_equal(0, val.strides))
collapsed_val = val[tuple(0 if ax in zero_stride_axes else slice(None) # type: ignore
for ax in range(val.ndim))]
out = hlo.broadcast_in_dim(
ir.RankedTensorType.get(
val.shape, dtype_to_ir_type(collapsed_val.dtype)), # type: ignore
_numpy_array_constant(collapsed_val),
dense_int_array(other_axes)) # type: ignore
return out
else:
return _numpy_array_constant(val)
register_constant_handler(np.ndarray, _ndarray_constant_handler)
register_constant_handler(literals.TypedNdArray, _ndarray_constant_handler)
for _scalar_type in [np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.complex64, np.complex128,
np.bool_, np.longlong, dtypes.bfloat16]:
register_constant_handler(_scalar_type, _ndarray_constant_handler) # type: ignore
def _python_scalar_handler(val, aval: core.AbstractValue | None):
assert isinstance(aval, core.ShapedArray), aval
assert aval.shape == (), aval
return _numpy_array_constant(np.array(val, aval.dtype))
for ptype in dtypes.python_scalar_types:
register_constant_handler(ptype, _python_scalar_handler)
def _token_constant_handler(val: core.Token, aval: core.AbstractValue | None):
return hlo.create_token()
register_constant_handler(core.Token, _token_constant_handler)
# Attributes
AttributeHandler = Callable[[Any], ir.Attribute]
_attribute_handlers: dict[type[Any], AttributeHandler] = {}
def register_attribute_handler(type_: type[Any], handler_fun: AttributeHandler):
_attribute_handlers[type_] = handler_fun
def get_attribute_handler(type_: type[Any]) -> AttributeHandler:
return _attribute_handlers[type_]
def _numpy_scalar_attribute(val: Any) -> ir.Attribute:
mlir_type = dtype_to_ir_type(val.dtype)
if isinstance(mlir_type, ir.IntegerType):
return ir.IntegerAttr.get(mlir_type, val)
elif isinstance(mlir_type, ir.FloatType):
return ir.FloatAttr.get(mlir_type, val)
else:
raise TypeError(f"Unsupported scalar attribute type: {type(val)}")
def _numpy_array_attribute(x: np.ndarray | np.generic) -> ir.Attribute:
element_type = dtype_to_ir_type(x.dtype)
shape = x.shape
if x.dtype == np.bool_:
x = np.packbits(x, bitorder='little') # type: ignore
x = np.ascontiguousarray(x)
return ir.DenseElementsAttr.get(x, type=element_type, shape=shape) # type: ignore
def _numpy_array_attribute_handler(val: np.ndarray | np.generic) -> ir.Attribute:
if 0 in val.strides and val.size > 0:
raise ValueError(
"NumPy arrays with zero strides are not supported as MLIR attributes")
if val.dtype == dtypes.float0:
val = np.zeros(val.shape, dtype=np.bool_)
if dtypes.is_python_scalar(val) or np.isscalar(val):
return _numpy_scalar_attribute(val)
else:
return _numpy_array_attribute(val)
register_attribute_handler(np.ndarray, _numpy_array_attribute_handler)
register_attribute_handler(hashable_array.HashableArray,
lambda x: _numpy_array_attribute_handler(x.val))
for _scalar_type in [np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.complex64, np.complex128,
np.bool_, np.longlong, dtypes.bfloat16]:
register_attribute_handler(_scalar_type, _numpy_array_attribute_handler) # type: ignore
def _dtype_attribute_handler(dtype: np.dtype | np.generic) -> ir.Attribute:
return ir.TypeAttr.get(dtype_to_ir_type(dtype))
register_attribute_handler(np.dtype, _dtype_attribute_handler)
register_attribute_handler(np.generic, _dtype_attribute_handler)
def _python_scalar_attribute_handler(dtype, val):
return _numpy_scalar_attribute(np.array(val, dtype))
for ptype, dtype in dtypes.python_scalar_types_to_dtypes.items():
register_attribute_handler(
ptype, partial(_python_scalar_attribute_handler, dtype))
register_attribute_handler(str, ir.StringAttr.get)
register_attribute_handler(bytes, ir.StringAttr.get)
def _dict_attribute_handler(val: dict[str, Any]) -> ir.Attribute:
return ir.DictAttr.get({k: ir_attribute(v) for k, v in val.items()})
register_attribute_handler(dict, _dict_attribute_handler)
def _sequence_attribute_handler(val: Sequence[Any]) -> ir.Attribute:
return ir.ArrayAttr.get([ir_attribute(v) for v in val])
register_attribute_handler(list, _sequence_attribute_handler)
register_attribute_handler(tuple, _sequence_attribute_handler)
register_attribute_handler(ir.Attribute, lambda x: x)
register_attribute_handler(ir.Type, lambda x: x)
def ir_attribute(val: Any) -> ir.Attribute:
"""Convert a Python value to an MLIR attribute."""
for t in type(val).__mro__:
handler = _attribute_handlers.get(t)
if handler:
out = handler(val)
assert isinstance(out, ir.Attribute), (type(val), out)
return out
if hasattr(val, '__jax_array__'):
return ir_attribute(val.__jax_array__())
raise TypeError(f"No attribute handler defined for type: {type(val)}")
# Source locations
def get_canonical_source_file(file_name: str, caches: TracebackCaches) -> str:
canonical_file_name = caches.canonical_name_cache.get(file_name, None)
if canonical_file_name is not None:
return canonical_file_name
pattern = config.hlo_source_file_canonicalization_regex.value
if pattern:
file_name = re.sub(pattern, '', file_name)
caches.canonical_name_cache[file_name] = file_name
return file_name
def _traceback_to_location(ctx: ModuleContext, tb: xc.Traceback) -> ir.Location:
"""Converts a full traceback to a callsite() MLIR location."""
return ctx.traceback_caches.traceback_to_location_cache.get(tb)
def source_info_to_location(
ctx: ModuleContext, primitive: core.Primitive | None,
name_stack: source_info_util.NameStack,
traceback: xc.Traceback | None) -> ir.Location:
if config.include_full_tracebacks_in_locations.value:
if traceback is None:
loc = ir.Location.unknown()
else:
loc = _traceback_to_location(ctx, traceback)
else:
frame = source_info_util.user_frame(traceback)
if frame is None:
loc = ir.Location.unknown()
else:
loc = ir.Location.file(get_canonical_source_file(frame.file_name,
ctx.traceback_caches),
frame.start_line, frame.start_column)
if primitive is None:
if name_stack.stack:
loc = ir.Location.name(str(name_stack), childLoc=loc)
else:
eqn_str = (
f"{name_stack}/{primitive.name}" if name_stack.stack else primitive.name
)
loc = ir.Location.name(eqn_str, childLoc=loc)
loc = ir.Location.name(f"{primitive.name}:", childLoc=loc)
return loc
upstream_dialects = ir.DialectRegistry()
jax_mlir_ext.register_dialects(upstream_dialects)
# Dumping MLIR modules
_ir_dump_counter = itertools.count()
def dump_module_to_file(module: ir.Module, stage_name: str) -> str | None:
"""Dumps the `module` IR to a file.
Dumps the module if JAX_DUMP_IR_TO is defined.
Args:
module: The module to dump
stage_name: A name to distinguish different stages of a module, will be
appended to the `module.name`.
Returns:
The name of the file containing the dump if JAX_DUMP_IR_TO is defined and
the module was dumped, `None` otherwise.
"""
if not (out_dir := path.make_jax_dump_dir(config.jax_dump_ir_to.value)):
return None
modes = config.jax_dump_ir_modes.value.split(',')
if 'stablehlo' not in modes:
return None
id = next(_ir_dump_counter)
sym_name = module.operation.attributes['sym_name']
module_name = ir.StringAttr(sym_name).value
name = f"jax_ir{id:04d}_{_make_string_safe_for_filename(module_name)}_{stage_name}.mlir"
full_path = out_dir / name
full_path.write_text(module_to_string(module))
return name
def dump_module_message(module: ir.Module, stage_name: str) -> str:
dumped_to = dump_module_to_file(module, stage_name)
if dumped_to:
return f"The module was dumped to {dumped_to}."
else:
return "Define JAX_DUMP_IR_TO to dump the module."
def _make_string_safe_for_filename(s: str) -> str:
return re.sub(r'[^\w.)( -]', '', s)
def module_to_string(module: ir.Module, enable_debug_info=None) -> str:
output = io.StringIO()
if enable_debug_info is None:
enable_debug_flag = str(config.jax_include_debug_info_in_dumps.value).lower()
enable_debug_info = enable_debug_flag not in ('false', '0')
module.operation.print(file=output, enable_debug_info=enable_debug_info)
return output.getvalue()
def module_to_bytecode(module: ir.Module) -> bytes:
output = io.BytesIO()
module.operation.write_bytecode(file=output)
return output.getvalue()
# Translation rules
# Create one global thread pool that can be shared between multiple ir.Contexts
# and enabling multi-threading
global_thread_pool = ir.ThreadPool()
|
ConstantHandler
|
python
|
numpy__numpy
|
numpy/_core/tests/test_indexing.py
|
{
"start": 53063,
"end": 53401
}
|
class ____:
"""An index can only have a single ellipsis.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, lambda: a[..., ...])
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
|
TestMultipleEllipsisError
|
python
|
google__jax
|
jax/_src/pallas/mosaic_gpu/core.py
|
{
"start": 46599,
"end": 48117
}
|
class ____:
"""Represents a mesh over individual warps within a warpgroup.
When used in conjunction with `core_map`, the warp ID will be visible
within the body of the wrapped scope by querying `lax.axis_index` with
the specified axis name.
"""
_NUM_WARPS_PER_WARPGROUP: ClassVar[int] = 4
axis_name: str
@property
def shape(self):
return collections.OrderedDict([
(self.axis_name, self._NUM_WARPS_PER_WARPGROUP),
])
def discharges_effect(self, effect: jax_core.Effect):
del effect
return False
def _gpu_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh,
jaxpr,
compiler_params,
interpret,
debug,
cost_estimate,
name,
metadata,
):
if not isinstance(mesh, Mesh):
raise TypeError(f"Mesh must be a `plgpu.Mesh`, got {type(mesh)}")
if compiler_params and not isinstance(compiler_params, CompilerParams):
raise TypeError(
"Compiler params must be a `plgpu.CompilerParams`, got"
f" {type(compiler_params)}"
)
if not compiler_params:
compiler_params = CompilerParams()
return pallas_core.default_mesh_discharge_rule(
in_avals,
out_avals,
*args,
jaxpr=jaxpr,
mesh=mesh,
compiler_params=compiler_params,
debug=debug,
interpret=interpret,
cost_estimate=cost_estimate,
name=name,
memory_space=GMEM,
metadata=metadata,
scratch_shapes=[],
)
pallas_core._core_map_mesh_rules[Mesh] = _gpu_mesh_discharge_rule
|
WarpMesh
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.