language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | realpython__materials | arcade-a-primer/arcade_draw_shapes.py | {
"start": 180,
"end": 2220
} | class ____(arcade.Window):
"""Our main welcome window"""
def __init__(self):
"""Initialize the window"""
# Call the parent class constructor
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the background window
arcade.set_background_color(arcade.color.WHITE)
def on_draw(self):
"""Called whenever we need to draw our window"""
# Clear the screen and start drawing
arcade.start_render()
# Draw a blue arc
arcade.draw_arc_filled(100, 100, 40, 40, arcade.color.BLUE, 0, 125)
# Draw a red ellipse
arcade.draw_ellipse_outline(
300, 100, 60, 30, arcade.color.RED, border_width=2
)
# Draw some purple lines
arcade.draw_line(500, 100, 550, 100, arcade.color.PURPLE)
arcade.draw_line(500, 90, 550, 90, arcade.color.PURPLE, line_width=2)
arcade.draw_line(500, 80, 550, 80, arcade.color.PURPLE, line_width=3)
# Draw an orange parabola
arcade.draw_parabola_filled(100, 100, 130, 120, arcade.color.ORANGE)
# Draw a black point
arcade.draw_point(300, 300, arcade.color.BLACK, 20)
# Draw a green polygon
points_list = [
[500, 300],
[550, 300],
[575, 325],
[550, 350],
[525, 340],
]
arcade.draw_polygon_outline(
points_list, arcade.color.GREEN, line_width=5
)
# Draw some rectangles
arcade.draw_rectangle_filled(100, 500, 150, 75, arcade.color.AZURE)
arcade.draw_lrtb_rectangle_filled(
150, 250, 575, 525, arcade.color.AMARANTH_PINK
)
arcade.draw_xywh_rectangle_filled(
200, 550, 150, 75, arcade.color.ASPARAGUS
)
# Draw some triangles
arcade.draw_triangle_filled(
400, 500, 500, 500, 450, 575, arcade.color.DEEP_RUBY
)
# Main code entry point
if __name__ == "__main__":
app = Welcome()
arcade.run()
| Welcome |
python | python__mypy | mypy/report.py | {
"start": 26673,
"end": 27124
} | class ____(AbstractReporter):
"""Internal abstract class for reporters that work via XML."""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
memory_reporter = reports.add_report("memory-xml", "<memory>")
assert isinstance(memory_reporter, MemoryXmlReporter)
# The dependency will be called first.
self.memory_xml = memory_reporter
| AbstractXmlReporter |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 46915,
"end": 48046
} | class ____(Base):
"""
DB model for evaluation dataset tags.
"""
__tablename__ = "evaluation_dataset_tags"
dataset_id = Column(
String(36),
ForeignKey("evaluation_datasets.dataset_id", ondelete="CASCADE"),
primary_key=True,
)
"""
Dataset ID: `String` (limit 36 characters). Foreign key to evaluation_datasets.
*Primary Key* for ``evaluation_dataset_tags`` table.
"""
key = Column(String(255), primary_key=True)
"""
Tag key: `String` (limit 255 characters).
*Primary Key* for ``evaluation_dataset_tags`` table.
"""
value = Column(String(5000), nullable=True)
"""
Tag value: `String` (limit 5000 characters).
"""
__table_args__ = (
PrimaryKeyConstraint("dataset_id", "key", name="evaluation_dataset_tags_pk"),
ForeignKeyConstraint(
["dataset_id"],
["evaluation_datasets.dataset_id"],
name="fk_evaluation_dataset_tags_dataset_id",
ondelete="CASCADE",
),
Index("index_evaluation_dataset_tags_dataset_id", "dataset_id"),
)
| SqlEvaluationDatasetTag |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 27848,
"end": 29854
} | class ____(torch.nn.Module):
def forward(self, L_x_: "i16[3]"):
l_x_ = L_x_
getitem = l_x_[0]
item: "Sym(u0)" = getitem.item(); getitem = None
wrap_body_1 = self.wrap_body_1
wrap = torch.ops.higher_order.wrap(wrap_body_1, item, l_x_); wrap_body_1 = item = l_x_ = None
getitem_3: "i16[3]" = wrap[0]; wrap = None
return (getitem_3,)
class wrap_body_1(torch.nn.Module):
def forward(self, item: "Sym(u0)", l_x_: "i16[3]"):
wrap_body_0 = self.wrap_body_0
wrap = torch.ops.higher_order.wrap(wrap_body_0, item, l_x_); wrap_body_0 = item = l_x_ = None
getitem: "i16[3]" = wrap[0]; wrap = None
return (getitem,)
class wrap_body_0(torch.nn.Module):
def forward(self, item: "Sym(u0)", l_x_: "i16[3]"):
add: "i16[3]" = item + l_x_; item = l_x_ = None
return (add,)
""",
)
@torch._dynamo.config.patch(
capture_dynamic_output_shape_ops=True,
)
def test_tensor_and_unbacked_symbol_closure(self):
def f(x):
c = x.nonzero()
sz = c.size(0)
def g(x):
def k(x):
return x.sin() + sz, c.sin()
return wrap(k, x)
return wrap(g, x)
x = torch.randn(3)
arg_count = ifdynstaticdefault(4, 5)
# when compiled with dynamic, we don't have upper bound runtime assertions for u0
expected_op_count = ifdynstaticdefault(10, 8)
out_graph = self._test_wrap_simple(
f,
default_args_generator((x,)),
arg_count,
expected_op_count,
return_graph=True,
)
# Note that u0 is accessed from sz and the shape of c
# We cached via the symbol u0 and de-duplicate them.
if not check_dynamic_shape_capture():
self.assertExpectedInline(
out_graph,
"""\
| GraphModule |
python | ray-project__ray | python/ray/train/v2/_internal/state/schema.py | {
"start": 3691,
"end": 4107
} | class ____(BaseModel):
"""Memory usage information for a process."""
rss: int = Field(description="The resident set size (RSS) memory usage in bytes.")
vms: int = Field(description="The virtual memory size (VMS) usage in bytes.")
pfaults: Optional[int] = Field(description="The number of page faults.")
pageins: Optional[int] = Field(description="The number of page-ins.")
@DeveloperAPI
| MemoryInfo |
python | streamlit__streamlit | lib/streamlit/elements/widgets/camera_input.py | {
"start": 2134,
"end": 3123
} | class ____:
def serialize(
self,
snapshot: SomeUploadedSnapshotFile,
) -> FileUploaderStateProto:
state_proto = FileUploaderStateProto()
if snapshot is None or isinstance(snapshot, DeletedFile):
return state_proto
file_info: UploadedFileInfoProto = state_proto.uploaded_file_info.add()
file_info.file_id = snapshot.file_id
file_info.name = snapshot.name
file_info.size = snapshot.size
file_info.file_urls.CopyFrom(snapshot._file_urls)
return state_proto
def deserialize(
self, ui_value: FileUploaderStateProto | None
) -> SomeUploadedSnapshotFile:
upload_files = _get_upload_files(ui_value)
return_value = None if len(upload_files) == 0 else upload_files[0]
if return_value is not None and not isinstance(return_value, DeletedFile):
enforce_filename_restriction(return_value.name, [".jpg"])
return return_value
| CameraInputSerde |
python | huggingface__transformers | src/transformers/integrations/integration_utils.py | {
"start": 38087,
"end": 43604
} | class ____(TrainerCallback):
"""
A [`TrainerCallback`] that logs metrics to Trackio.
It records training metrics, model (and PEFT) configuration, and GPU memory usage.
If `nvidia-ml-py` is installed, GPU power consumption is also tracked.
**Requires**:
```bash
pip install trackio
```
"""
def __init__(self):
has_trackio = is_trackio_available()
if not has_trackio:
raise RuntimeError("TrackioCallback requires trackio to be installed. Run `pip install trackio`.")
if has_trackio:
import trackio
self._trackio = trackio
self._initialized = False
def setup(self, args, state, model, **kwargs):
"""
Setup the optional Trackio integration.
To customize the setup you can also set the arguments `project`, `trackio_space_id` and `hub_private_repo` in
[`TrainingArguments`]. Please refer to the docstring of for more details.
"""
if state.is_world_process_zero:
if os.getenv("TRACKIO_PROJECT"):
logger.warning(
"The `TRACKIO_PROJECT` environment variable is deprecated and will be removed in a future "
"version. Use TrainingArguments.project instead."
)
project = os.getenv("TRACKIO_PROJECT")
else:
project = args.project
if os.getenv("TRACKIO_SPACE_ID"):
logger.warning(
"The `TRACKIO_SPACE_ID` environment variable is deprecated and will be removed in a future "
"version. Use TrainingArguments.trackio_space_id instead."
)
space_id = os.getenv("TRACKIO_SPACE_ID")
else:
space_id = args.trackio_space_id
combined_dict = {**args.to_dict()}
if hasattr(model, "config") and model.config is not None:
model_config = model.config if isinstance(model.config, dict) else model.config.to_dict()
combined_dict = {**model_config, **combined_dict}
if hasattr(model, "peft_config") and model.peft_config is not None:
peft_config = model.peft_config
combined_dict = {"peft_config": peft_config, **combined_dict}
self._trackio.init(
project=project,
name=args.run_name,
space_id=space_id,
resume="allow",
private=args.hub_private_repo,
)
# Add config parameters (run may have been created manually)
self._trackio.config.update(combined_dict, allow_val_change=True)
# Add number of model parameters to trackio config
try:
self._trackio.config["model/num_parameters"] = model.num_parameters()
except AttributeError:
logger.info("Could not log the number of model parameters in Trackio due to an AttributeError.")
self._initialized = True
def on_train_begin(self, args, state, control, model=None, **kwargs):
if not self._initialized:
self.setup(args, state, model, **kwargs)
def on_train_end(self, args: TrainingArguments, state, control, model=None, processing_class=None, **kwargs):
if state.is_world_process_zero and self._initialized:
self._trackio.finish()
def on_log(self, args, state, control, model=None, logs=None, **kwargs):
single_value_scalars = [
"train_runtime",
"train_samples_per_second",
"train_steps_per_second",
"train_loss",
"total_flos",
]
if is_torch_available() and torch.cuda.is_available():
device_idx = torch.cuda.current_device()
total_memory = torch.cuda.get_device_properties(device_idx).total_memory
memory_allocated = torch.cuda.memory_allocated(device_idx)
gpu_memory_logs = {
f"gpu/{device_idx}/allocated_memory": memory_allocated / (1024**3), # GB
f"gpu/{device_idx}/memory_usage": memory_allocated / total_memory, # ratio
}
if _is_package_available("pynvml"):
power = torch.cuda.power_draw(device_idx)
gpu_memory_logs[f"gpu/{device_idx}/power"] = power / 1000 # Watts
if dist.is_available() and dist.is_initialized():
gathered_logs = [None] * dist.get_world_size()
dist.all_gather_object(gathered_logs, gpu_memory_logs)
gpu_memory_logs = {k: v for d in gathered_logs for k, v in d.items()}
else:
gpu_memory_logs = {}
if not self._initialized:
self.setup(args, state, model)
if state.is_world_process_zero:
non_scalar_logs = {k: v for k, v in logs.items() if k not in single_value_scalars}
non_scalar_logs = rewrite_logs(non_scalar_logs)
self._trackio.log({**non_scalar_logs, **gpu_memory_logs, "train/global_step": state.global_step})
def on_save(self, args, state, control, **kwargs):
return
def on_predict(self, args, state, control, metrics, **kwargs):
if self._trackio is None:
return
if not self._initialized:
self.setup(args, state, **kwargs)
if state.is_world_process_zero:
metrics = rewrite_logs(metrics)
self._trackio.log(metrics)
| TrackioCallback |
python | django__django | tests/generic_views/views.py | {
"start": 841,
"end": 919
} | class ____(generic.DetailView):
queryset = Author.objects.all()
| AuthorDetail |
python | pytorch__pytorch | benchmarks/dynamo/genai_layers/kernels.py | {
"start": 11426,
"end": 14149
} | class ____(BenchmarkKernel):
def __init__(self, script_args):
super().__init__(script_args)
self.available_backends = ["eager", "compiled", "quack", "liger"]
def get_shapes(self) -> tuple[tuple[int, ...], ...]:
return (
(32768, 256),
(32768, 512),
(32768, 1024),
(32768, 2048),
(32768, 4096),
(32768, 8192),
(32768, 16384),
(32768, 32768),
(32768, 65536),
(16384, 131072),
(8192, 262144),
) + extra_shapes_for_norm
def get_memory_bytes(self, args, kwargs) -> int:
x, w = args
M, N = x.shape
return 2 * M * N * x.dtype.itemsize + N * w.dtype.itemsize
def rms_norm_ref(self, x, w):
x_f32 = x.float()
return (
x_f32
* torch.rsqrt(torch.mean(x_f32.square(), dim=-1, keepdim=True) + 1e-6)
* w
).to(x.dtype)
def eager(self, args, kwargs=None) -> Any:
assert kwargs is None
x, w = args
return lambda: self.rms_norm_ref(x, w)
def compiled(self, args, kwargs=None) -> Any:
assert kwargs is None
x, w = args
# Mark batch size as dynamic for realistic workload
torch._dynamo.mark_dynamic(x, 0)
compiled_rms_norm = torch.compile(
self.rms_norm_ref, mode=self.compile_mode, fullgraph=True
)
return lambda: compiled_rms_norm(x, w)
def quack(self, args, kwargs=None) -> Any:
# Note: only supper weight with float32 dtype
from quack.rmsnorm import _rmsnorm_fwd
x, w = args
y = torch.empty_like(x)
def quack_fwd():
_rmsnorm_fwd(
x,
w,
out=y,
bias=None,
rstd=None,
residual=None,
residual_out=None,
eps=1e-6,
)
return y
return quack_fwd
def liger(self, args, kwargs) -> Any:
from liger_kernel.transformers.rms_norm import LigerRMSNorm
x, w = args
M, N = x.shape
liger_rmsnorm = LigerRMSNorm(hidden_size=N, eps=1e-6).cuda()
liger_rmsnorm.weight.data.copy_(w)
return lambda: liger_rmsnorm(x)
def benchmark(self):
for M, N in self.get_shapes():
print(f"Tensor dimensions: [{M}, {N}]")
torch_dtype = cutlass_torch.dtype(cutlass.BFloat16)
x = torch.randn(M, N, device="cuda", dtype=torch_dtype)
w = torch.randn(N, device="cuda", dtype=torch.float32)
self.benchmark_single_shape((x, w), setting=f"shape: [{M}, {N}]")
| RMSNormForward |
python | wandb__wandb | wandb/sdk/mailbox/mailbox.py | {
"start": 372,
"end": 469
} | class ____(Exception):
"""The mailbox has been closed and cannot be used."""
| MailboxClosedError |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 6526,
"end": 8767
} | class ____(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| BaseModelOutputWithPast |
python | python-excel__xlwt | tests/test_unicodeutils.py | {
"start": 150,
"end": 4975
} | class ____(unittest.TestCase):
def test_upack1(self):
result = b'\x1d\x00abcdefghijklmnopqrstuvwxyz\xd6\xc4\xdc'
ustr = upack1("abcdefghijklmnopqrstuvwxyzÖÄÜ")
self.assertEqual(ustr, result)
def test_upack2_ascii(self):
result = b'\x1d\x00\x00abcdefghijklmnopqrstuvwxyz\xd6\xc4\xdc'
ustr = upack2("abcdefghijklmnopqrstuvwxyzÖÄÜ")
self.assertEqual(ustr, result)
def test_upack2_latin1(self):
result = b'\x1d\x00\x00abcdefghijklmnopqrstuvwxyz\xd6\xc4\xdc'
ustr = upack2("abcdefghijklmnopqrstuvwxyzÖÄÜ", encoding='latin1')
self.assertEqual(ustr, result)
def test_upack2_cp1251(self):
result = b'\x1d\x00\x00abcdefghijklmnopqrstuvwxyz\xce\xeb\xff'
ustr = upack2("abcdefghijklmnopqrstuvwxyz\xce\xeb\xff", encoding='cp1251')
self.assertEqual(ustr, result)
def test_unicode(self):
chr_ = chr if (sys.version_info[0] >= 3) else unichr
result = b'\x00\x02\x01\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\x07\x00\x08\x00\t\x00\n\x00\x0b\x00\x0c\x00\r\x00\x0e\x00\x0f\x00\x10\x00\x11\x00\x12\x00\x13\x00\x14\x00\x15\x00\x16\x00\x17\x00\x18\x00\x19\x00\x1a\x00\x1b\x00\x1c\x00\x1d\x00\x1e\x00\x1f\x00 \x00!\x00"\x00#\x00$\x00%\x00&\x00\'\x00(\x00)\x00*\x00+\x00,\x00-\x00.\x00/\x000\x001\x002\x003\x004\x005\x006\x007\x008\x009\x00:\x00;\x00<\x00=\x00>\x00?\x00@\x00A\x00B\x00C\x00D\x00E\x00F\x00G\x00H\x00I\x00J\x00K\x00L\x00M\x00N\x00O\x00P\x00Q\x00R\x00S\x00T\x00U\x00V\x00W\x00X\x00Y\x00Z\x00[\x00\\\x00]\x00^\x00_\x00`\x00a\x00b\x00c\x00d\x00e\x00f\x00g\x00h\x00i\x00j\x00k\x00l\x00m\x00n\x00o\x00p\x00q\x00r\x00s\x00t\x00u\x00v\x00w\x00x\x00y\x00z\x00{\x00|\x00}\x00~\x00\x7f\x00\x80\x00\x81\x00\x82\x00\x83\x00\x84\x00\x85\x00\x86\x00\x87\x00\x88\x00\x89\x00\x8a\x00\x8b\x00\x8c\x00\x8d\x00\x8e\x00\x8f\x00\x90\x00\x91\x00\x92\x00\x93\x00\x94\x00\x95\x00\x96\x00\x97\x00\x98\x00\x99\x00\x9a\x00\x9b\x00\x9c\x00\x9d\x00\x9e\x00\x9f\x00\xa0\x00\xa1\x00\xa2\x00\xa3\x00\xa4\x00\xa5\x00\xa6\x00\xa7\x00\xa8\x00\xa9\x00\xaa\x00\xab\x00\xac\x00\xad\x00\xae\x00\xaf\x00\xb0\x00\xb1\x00\xb2\x00\xb3\x00\xb4\x00\xb5\x00\xb6\x00\xb7\x00\xb8\x00\xb9\x00\xba\x00\xbb\x00\xbc\x00\xbd\x00\xbe\x00\xbf\x00\xc0\x00\xc1\x00\xc2\x00\xc3\x00\xc4\x00\xc5\x00\xc6\x00\xc7\x00\xc8\x00\xc9\x00\xca\x00\xcb\x00\xcc\x00\xcd\x00\xce\x00\xcf\x00\xd0\x00\xd1\x00\xd2\x00\xd3\x00\xd4\x00\xd5\x00\xd6\x00\xd7\x00\xd8\x00\xd9\x00\xda\x00\xdb\x00\xdc\x00\xdd\x00\xde\x00\xdf\x00\xe0\x00\xe1\x00\xe2\x00\xe3\x00\xe4\x00\xe5\x00\xe6\x00\xe7\x00\xe8\x00\xe9\x00\xea\x00\xeb\x00\xec\x00\xed\x00\xee\x00\xef\x00\xf0\x00\xf1\x00\xf2\x00\xf3\x00\xf4\x00\xf5\x00\xf6\x00\xf7\x00\xf8\x00\xf9\x00\xfa\x00\xfb\x00\xfc\x00\xfd\x00\xfe\x00\xff\x00\x00\x01\x01\x01\x02\x01\x03\x01\x04\x01\x05\x01\x06\x01\x07\x01\x08\x01\t\x01\n\x01\x0b\x01\x0c\x01\r\x01\x0e\x01\x0f\x01\x10\x01\x11\x01\x12\x01\x13\x01\x14\x01\x15\x01\x16\x01\x17\x01\x18\x01\x19\x01\x1a\x01\x1b\x01\x1c\x01\x1d\x01\x1e\x01\x1f\x01 \x01!\x01"\x01#\x01$\x01%\x01&\x01\'\x01(\x01)\x01*\x01+\x01,\x01-\x01.\x01/\x010\x011\x012\x013\x014\x015\x016\x017\x018\x019\x01:\x01;\x01<\x01=\x01>\x01?\x01@\x01A\x01B\x01C\x01D\x01E\x01F\x01G\x01H\x01I\x01J\x01K\x01L\x01M\x01N\x01O\x01P\x01Q\x01R\x01S\x01T\x01U\x01V\x01W\x01X\x01Y\x01Z\x01[\x01\\\x01]\x01^\x01_\x01`\x01a\x01b\x01c\x01d\x01e\x01f\x01g\x01h\x01i\x01j\x01k\x01l\x01m\x01n\x01o\x01p\x01q\x01r\x01s\x01t\x01u\x01v\x01w\x01x\x01y\x01z\x01{\x01|\x01}\x01~\x01\x7f\x01\x80\x01\x81\x01\x82\x01\x83\x01\x84\x01\x85\x01\x86\x01\x87\x01\x88\x01\x89\x01\x8a\x01\x8b\x01\x8c\x01\x8d\x01\x8e\x01\x8f\x01\x90\x01\x91\x01\x92\x01\x93\x01\x94\x01\x95\x01\x96\x01\x97\x01\x98\x01\x99\x01\x9a\x01\x9b\x01\x9c\x01\x9d\x01\x9e\x01\x9f\x01\xa0\x01\xa1\x01\xa2\x01\xa3\x01\xa4\x01\xa5\x01\xa6\x01\xa7\x01\xa8\x01\xa9\x01\xaa\x01\xab\x01\xac\x01\xad\x01\xae\x01\xaf\x01\xb0\x01\xb1\x01\xb2\x01\xb3\x01\xb4\x01\xb5\x01\xb6\x01\xb7\x01\xb8\x01\xb9\x01\xba\x01\xbb\x01\xbc\x01\xbd\x01\xbe\x01\xbf\x01\xc0\x01\xc1\x01\xc2\x01\xc3\x01\xc4\x01\xc5\x01\xc6\x01\xc7\x01\xc8\x01\xc9\x01\xca\x01\xcb\x01\xcc\x01\xcd\x01\xce\x01\xcf\x01\xd0\x01\xd1\x01\xd2\x01\xd3\x01\xd4\x01\xd5\x01\xd6\x01\xd7\x01\xd8\x01\xd9\x01\xda\x01\xdb\x01\xdc\x01\xdd\x01\xde\x01\xdf\x01\xe0\x01\xe1\x01\xe2\x01\xe3\x01\xe4\x01\xe5\x01\xe6\x01\xe7\x01\xe8\x01\xe9\x01\xea\x01\xeb\x01\xec\x01\xed\x01\xee\x01\xef\x01\xf0\x01\xf1\x01\xf2\x01\xf3\x01\xf4\x01\xf5\x01\xf6\x01\xf7\x01\xf8\x01\xf9\x01\xfa\x01\xfb\x01\xfc\x01\xfd\x01\xfe\x01\xff\x01'
unicodestring = ''.join( [chr_(i) for i in range(0x200)])
self.assertEqual(result, upack2(unicodestring))
def test_upack2rt(self):
result = b'\x06\x00\x09\x01\x00a\x00b\x00c\x00\x91\x03\x92\x03\x93\x03', b'\x00\x00\x0C\x00'
self.assertEqual(result, upack2rt([('abcΑΒΓ', 12)]))
| TestUpack |
python | tensorflow__tensorflow | tensorflow/python/framework/extension_type_test.py | {
"start": 37940,
"end": 39945
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_v2_only
def testDataset(self):
mt = MaskedTensorV3([[1], [2], [3]], [[True], [False], [True]])
ds = dataset_ops.DatasetV2.from_tensors(mt)
self.assertEqual(next(iter(ds)), mt)
@test_util.run_v2_only
def testDatasetBatch(self):
xs = MaskedTensorV3([[1], [2], [3]], [[True], [False], [True]])
x0 = MaskedTensorV3(xs.values[0], xs.mask[0])
ds = dataset_ops.DatasetV2.from_tensors(xs)
self.assertEqual(next(iter(ds)), xs)
ds = ds.unbatch()
self.assertEqual(next(iter(ds)), x0)
ds = dataset_ops.DatasetV2.from_tensor_slices(xs)
self.assertEqual(next(iter(ds)), x0)
ds = ds.batch(3, drop_remainder=True)
self.assertEqual(next(iter(ds)), xs)
@test_util.run_v2_only
def testDatasetBatchRagged(self):
xs = MaskedTensorV3(
ragged_factory_ops.constant([[1], [2, 3], [4]]),
ragged_factory_ops.constant([[True], [False], [True]]),
)
x0 = MaskedTensorV3(xs.values[0], xs.mask[0])
ds = dataset_ops.DatasetV2.from_tensors(xs)
self.assertEqual(next(iter(ds)), xs)
ds = ds.unbatch()
self.assertEqual(next(iter(ds)), x0)
ds = dataset_ops.DatasetV2.from_tensor_slices(xs)
self.assertEqual(next(iter(ds)), x0)
ds = ds.batch(3, drop_remainder=True)
self.assertEqual(next(iter(ds)), xs)
@test_util.run_v2_only
def testDistributedDataset(self):
strategy = mirrored_strategy.MirroredStrategy(['GPU:0', 'GPU:1'])
mt = MaskedTensorV3([[1], [2], [3], [4]], [[True], [False], [True], [True]])
ds = dataset_ops.DatasetV2.from_tensor_slices(mt).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(ds)
expect = MaskedTensorV3([[1]], [[True]])
per_replica_result = next(iter(dist_dataset))
self.assertEqual(per_replica_result.values[0].values, expect.values[0])
self.assertEqual(per_replica_result.values[0].mask, expect.mask[0])
@test_util.run_all_in_graph_and_eager_modes
| ExtensionTypeIntegrationTest |
python | huggingface__transformers | src/transformers/models/longt5/modeling_longt5.py | {
"start": 32782,
"end": 45419
} | class ____(nn.Module):
def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.local_radius = config.local_radius
self.block_len = self.local_radius + 1
self.global_block_size = config.global_block_size
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
# Relativen attention bias & Layer norm for global attention
if self.has_relative_attention_bias:
self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
@staticmethod
# Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, block_length: int):
"""Compute binned relative position bias"""
target_device = (
self.relative_attention_bias.weight.device
if self.relative_attention_bias.weight.device.type != "meta"
else None
)
memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device)
context_position = memory_position[block_length:-block_length]
# (block_length, 3 * block_length)
relative_position = memory_position[None, :] - context_position[:, None]
relative_position_bucket = self._relative_position_bucket(
relative_position, # (block_length, 3 * block_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
# (block_length, 3 * block_length, num_heads)
values = self.relative_attention_bias(relative_position_bucket)
# (1, 1, num_heads, block_length, 3 * block_length)
values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
return values
def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor:
# (batch_size, 1, seq_len, global_seq_len)
side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10)
# (batch_size, seq_len, global_seq_len)
side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size)
side_relative_position_bucket = self._relative_position_bucket(
side_relative_position,
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
# (batch_size, seq_len, global_seq_len, num_heads)
side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
# (batch_size, num_heads, seq_len, global_seq_len)
side_bias = side_bias.permute([0, 3, 1, 2])
# (batch_size, num_heads, seq_len, global_seq_len)
attention_side_bias = attention_side_bias + side_bias
return attention_side_bias
def forward(
self,
hidden_states,
mask=None,
position_bias=None,
output_attentions=False,
):
batch_size, seq_length = hidden_states.shape[:2]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
def unshape(states):
"""reshape"""
return states.contiguous().view(batch_size, -1, self.inner_dim)
# Prepare components for transient-global attention
# Obtain block_ids and global_segment_ids
# global_seq_len := seq_len // self.global_block_size
# shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
block_ids, global_segment_ids = _make_global_fixed_block_ids(
mask if mask is not None else torch.ones(hidden_states.shape[:-1]),
self.global_block_size,
)
# Create global inputs
_global_seq_len = global_segment_ids.shape[-1]
global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
global_inputs = self.global_input_layer_norm(global_inputs)
# get query states -> (batch_size, seq_length, n_heads, dim_per_head)
query_states = shape(self.q(hidden_states))
key_states = shape(self.k(hidden_states))
value_states = shape(self.v(hidden_states))
# Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head)
side_key_states = shape(self.k(global_inputs))
side_value_states = shape(self.v(global_inputs))
# Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
query_states = _split_into_blocks(query_states, self.block_len, dim=1)
key_states = _split_into_blocks(key_states, self.block_len, dim=1)
value_states = _split_into_blocks(value_states, self.block_len, dim=1)
# Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
# Tile side inputs across local key/value blocks
# New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
reps = [1] * (side_key_states.ndim + 1)
reps[1] = key_states.shape[1]
side_key_states = side_key_states.unsqueeze(1).repeat(reps)
side_value_states = side_value_states.unsqueeze(1).repeat(reps)
# Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
# New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
key_states = torch.cat([key_states, side_key_states], dim=2)
value_states = torch.cat([value_states, side_value_states], dim=2)
# Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len)
scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states)
if mask is not None:
# We need to adjust position bias shape to be sum with mask
local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device)
# Replace masked positions with -10_000 (according to the original implementation)
local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10)
else:
local_attention_mask = None
if position_bias is None:
# position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, 1, self.n_heads, self.block_len, 3 * self.block_len),
device=scores.device,
dtype=scores.dtype,
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(self.block_len)
if local_attention_mask is not None:
# (batch_size, 1, n_heads, block_len, 3 * block_len)
position_bias = position_bias + local_attention_mask.transpose(1, 2)
position_bias = position_bias.type(scores.dtype)
# Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
if mask is None:
mask = torch.ones(batch_size, seq_length)
# (batch_size, num_heads, seq_len, global_seq_len)
side_position_bias = self.compute_side_bias(mask, global_segment_ids)
# (batch_size, num_blocks, num_heads, block_len, global_seq_len)
side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2)
side_position_bias = side_position_bias.type(scores.dtype).to(scores.device)
# (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len)
position_bias = torch.cat([position_bias, side_position_bias], dim=-1)
scores += position_bias
# (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_weights = attn_weights.type(value_states.dtype)
attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
attn_output = attn_output[:, :seq_length, :]
attn_output = self.o(attn_output)
outputs = (attn_output, position_bias)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5
| LongT5TransientGlobalAttention |
python | eventlet__eventlet | tests/ssl_test.py | {
"start": 518,
"end": 15042
} | class ____(tests.LimitedTestCase):
def setUp(self):
# disabling socket.ssl warnings because we're testing it here
warnings.filterwarnings(
action='ignore',
message='.*socket.ssl.*',
category=DeprecationWarning)
super().setUp()
def test_duplex_response(self):
def serve(listener):
sock, addr = listener.accept()
sock.recv(8192)
sock.sendall(b'response')
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
client = ssl.wrap_socket(eventlet.connect(sock.getsockname()))
client.sendall(b'line 1\r\nline 2\r\n\r\n')
self.assertEqual(client.recv(8192), b'response')
server_coro.wait()
def test_ssl_context(self):
def serve(listener):
sock, addr = listener.accept()
sock.recv(8192)
sock.sendall(b'response')
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
context = ssl.create_default_context()
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(tests.certificate_file)
client = context.wrap_socket(
eventlet.connect(sock.getsockname()),
server_hostname='Test')
client.sendall(b'line 1\r\nline 2\r\n\r\n')
self.assertEqual(client.recv(8192), b'response')
server_coro.wait()
def test_ssl_close(self):
def serve(listener):
sock, addr = listener.accept()
sock.recv(8192)
try:
self.assertEqual(b'', sock.recv(8192))
except (greenio.SSL.ZeroReturnError,
BrokenPipeError):
pass
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
raw_client = eventlet.connect(sock.getsockname())
client = ssl.wrap_socket(raw_client)
client.sendall(b'X')
greenio.shutdown_safe(client)
client.close()
server_coro.wait()
def test_ssl_connect(self):
def serve(listener):
sock, addr = listener.accept()
sock.recv(8192)
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
raw_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_client = ssl.wrap_socket(raw_client)
ssl_client.connect(sock.getsockname())
ssl_client.sendall(b'abc')
greenio.shutdown_safe(ssl_client)
ssl_client.close()
server_coro.wait()
def test_recv_after_ssl_connect(self):
def serve(listener):
sock, addr = listener.accept()
sock.sendall(b'hjk')
sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, sock)
raw_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_client = ssl.wrap_socket(raw_client)
# Important: We need to call connect() on an SSL socket, not a plain one.
# The bug was affecting that particular combination (create plain socket,
# wrap, call connect() on the SSL socket and try to recv) on Python 3.5.
ssl_client.connect(sock.getsockname())
# The call to recv used to fail with:
# Traceback (most recent call last):
# File "tests/ssl_test.py", line 99, in test_recv_after_ssl_connect
# self.assertEqual(ssl_client.recv(3), b'hjk')
# File "eventlet/green/ssl.py", line 194, in recv
# return self._base_recv(buflen, flags, into=False)
# File "eventlet/green/ssl.py", line 227, in _base_recv
# read = self.read(nbytes)
# File "eventlet/green/ssl.py", line 139, in read
# super(GreenSSLSocket, self).read, *args, **kwargs)
# File "eventlet/green/ssl.py", line 113, in _call_trampolining
# return func(*a, **kw)
# File "PYTHONLIB/python3.5/ssl.py", line 791, in read
# return self._sslobj.read(len, buffer)
# TypeError: read() argument 2 must be read-write bytes-like object, not None
self.assertEqual(ssl_client.recv(3), b'hjk')
greenio.shutdown_safe(ssl_client)
ssl_client.close()
server_coro.wait()
def test_ssl_unwrap(self):
def serve():
sock, addr = listener.accept()
self.assertEqual(sock.recv(6), b'before')
sock_ssl = ssl.wrap_socket(sock, tests.private_key_file, tests.certificate_file,
server_side=True)
sock_ssl.do_handshake()
self.assertEqual(sock_ssl.recv(6), b'during')
sock2 = sock_ssl.unwrap()
self.assertEqual(sock2.recv(5), b'after')
sock2.close()
listener = eventlet.listen(('127.0.0.1', 0))
server_coro = eventlet.spawn(serve)
client = eventlet.connect(listener.getsockname())
client.sendall(b'before')
client_ssl = ssl.wrap_socket(client)
client_ssl.do_handshake()
client_ssl.sendall(b'during')
client2 = client_ssl.unwrap()
client2.sendall(b'after')
server_coro.wait()
def test_sendall_cpu_usage(self):
"""SSL socket.sendall() busy loop
https://bitbucket.org/eventlet/eventlet/issue/134/greenssl-performance-issues
Idea of this test is to check that GreenSSLSocket.sendall() does not busy loop
retrying .send() calls, but instead trampolines until socket is writeable.
BUFFER_SIZE and SENDALL_SIZE are magic numbers inferred through trial and error.
"""
# Time limit resistant to busy loops
self.set_alarm(1)
stage_1 = eventlet.event.Event()
BUFFER_SIZE = 1000
SENDALL_SIZE = 100000
def serve(listener):
conn, _ = listener.accept()
conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, BUFFER_SIZE)
self.assertEqual(conn.recv(8), b'request')
conn.sendall(b'response')
stage_1.wait()
conn.sendall(b'x' * SENDALL_SIZE)
server_sock = listen_ssl_socket()
server_coro = eventlet.spawn(serve, server_sock)
client_sock = eventlet.connect(server_sock.getsockname())
client_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, BUFFER_SIZE)
client = ssl.wrap_socket(client_sock)
client.sendall(b'request')
self.assertEqual(client.recv(8), b'response')
stage_1.send()
tests.check_idle_cpu_usage(0.2, 0.1)
server_coro.kill()
def test_greensslobject(self):
def serve(listener):
sock, addr = listener.accept()
sock.sendall(b'content')
greenio.shutdown_safe(sock)
sock.close()
listener = listen_ssl_socket()
eventlet.spawn(serve, listener)
client = ssl.wrap_socket(eventlet.connect(listener.getsockname()))
self.assertEqual(client.recv(1024), b'content')
self.assertEqual(client.recv(1024), b'')
@pytest.mark.xfail(sys.platform == "darwin", reason="Fails on macOS for some reason")
def test_regression_gh_17(self):
# https://github.com/eventlet/eventlet/issues/17
# ssl wrapped but unconnected socket methods go special code path
# test that path at least for syntax/typo errors
sock = ssl.wrap_socket(socket.socket())
sock.settimeout(0.01)
try:
sock.sendall(b'')
except ssl.SSLError as e:
assert 'timed out' in str(e)
def test_no_handshake_block_accept_loop(self):
listener = listen_ssl_socket()
listener.settimeout(0.3)
def serve(sock):
try:
name = sock.recv(8)
sock.sendall(b'hello ' + name)
except Exception:
# ignore evil clients
pass
finally:
greenio.shutdown_safe(sock)
sock.close()
def accept_loop():
while True:
try:
sock, _ = listener.accept()
except OSError:
return
eventlet.spawn(serve, sock)
loopt = eventlet.spawn(accept_loop)
# evil no handshake
evil = eventlet.connect(listener.getsockname())
good = ssl.wrap_socket(eventlet.connect(listener.getsockname()))
good.sendall(b'good')
response = good.recv(16)
good.close()
assert response == b'hello good'
evil.close()
listener.close()
loopt.wait()
eventlet.sleep(0)
def test_receiving_doesnt_block_if_there_is_already_decrypted_buffered_data(self):
# Here's what could (and would) happen before the relevant bug was fixed (assuming method
# M was trampolining unconditionally before actually reading):
# 1. One side sends n bytes, leaves connection open (important)
# 2. The other side uses method M to read m (where m < n) bytes, the underlying SSL
# implementation reads everything from the underlying socket, decrypts all n bytes,
# returns m of them and buffers n-m to be read later.
# 3. The other side tries to read the remainder of the data (n-m bytes), this blocks
# because M trampolines uncoditionally and trampoline will hang because reading from
# the underlying socket would block. It would block because there's no data to be read
# and the connection is still open; leaving the connection open /mentioned in 1./ is
# important because otherwise trampoline would return immediately and the test would pass
# even with the bug still present in the code).
#
# The solution is to first request data from the underlying SSL implementation and only
# trampoline if we actually need to read some data from the underlying socket.
#
# GreenSSLSocket.recv() wasn't broken but I've added code to test it as well for
# completeness.
content = b'xy'
def recv(sock, expected):
assert sock.recv(len(expected)) == expected
def recv_into(sock, expected):
buf = bytearray(len(expected))
assert sock.recv_into(buf, len(expected)) == len(expected)
assert buf == expected
for read_function in [recv, recv_into]:
print('Trying %s...' % (read_function,))
listener = listen_ssl_socket()
def accept(listener):
sock, addr = listener.accept()
sock.sendall(content)
return sock
accepter = eventlet.spawn(accept, listener)
client_to_server = None
try:
client_to_server = ssl.wrap_socket(eventlet.connect(listener.getsockname()))
for character in iter(content):
character = bytes((character,))
print('We have %d already decrypted bytes pending, expecting: %s' % (
client_to_server.pending(), character))
read_function(client_to_server, character)
finally:
if client_to_server is not None:
client_to_server.close()
server_to_client = accepter.wait()
# Very important: we only want to close the socket *after* the other side has
# read the data it wanted already, otherwise this would defeat the purpose of the
# test (see the comment at the top of this test).
server_to_client.close()
listener.close()
def test_context_wrapped_accept(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(tests.certificate_file, tests.private_key_file)
expected = "success:{}".format(random.random()).encode()
def client(addr):
client_tls = ssl.wrap_socket(
eventlet.connect(addr),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=tests.certificate_file,
)
client_tls.send(expected)
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('localhost', 0))
server_sock.listen(1)
eventlet.spawn(client, server_sock.getsockname())
server_tls = context.wrap_socket(server_sock, server_side=True)
peer, _ = server_tls.accept()
assert peer.recv(64) == expected
peer.close()
def test_explicit_keys_accept(self):
expected = "success:{}".format(random.random()).encode()
def client(addr):
client_tls = ssl.wrap_socket(
eventlet.connect(addr),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=tests.certificate_file,
)
client_tls.send(expected)
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('localhost', 0))
server_sock.listen(1)
eventlet.spawn(client, server_sock.getsockname())
server_tls = ssl.wrap_socket(
server_sock, server_side=True,
keyfile=tests.private_key_file, certfile=tests.certificate_file,
)
peer, _ = server_tls.accept()
assert peer.recv(64) == expected
peer.close()
def test_client_check_hostname(self):
# stdlib API compatibility
# https://github.com/eventlet/eventlet/issues/567
def serve(listener):
sock, addr = listener.accept()
sock.recv(64)
sock.sendall(b"response")
sock.close()
listener = listen_ssl_socket()
server_coro = eventlet.spawn(serve, listener)
ctx = ssl.create_default_context()
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(tests.certificate_file)
ctx.load_cert_chain(tests.certificate_file, tests.private_key_file)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client = ctx.wrap_socket(sock, server_hostname="Test")
client.connect(listener.getsockname())
client.send(b"check_hostname works")
client.recv(64)
server_coro.wait()
@tests.skip_if(sys.version_info < (3, 7))
def test_context_version_setters(self):
tests.run_isolated("ssl_context_version_setters.py")
| SSLTest |
python | sympy__sympy | sympy/assumptions/cnf.py | {
"start": 7690,
"end": 10795
} | class ____:
"""
Class to represent CNF of a Boolean expression.
Consists of set of clauses, which themselves are stored as
frozenset of Literal objects.
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.cnf import CNF
>>> from sympy.abc import x
>>> cnf = CNF.from_prop(Q.real(x) & ~Q.zero(x))
>>> cnf.clauses
{frozenset({Literal(Q.real(x), False)}), frozenset({Literal(Q.zero(x), True)})}
"""
def __init__(self, clauses=None):
if not clauses:
clauses = set()
self.clauses = clauses
def add(self, prop):
clauses = CNF.to_CNF(prop).clauses
self.add_clauses(clauses)
def __str__(self):
s = ' & '.join(
['(' + ' | '.join([str(lit) for lit in clause]) +')'
for clause in self.clauses]
)
return s
def extend(self, props):
for p in props:
self.add(p)
return self
def copy(self):
return CNF(set(self.clauses))
def add_clauses(self, clauses):
self.clauses |= clauses
@classmethod
def from_prop(cls, prop):
res = cls()
res.add(prop)
return res
def __iand__(self, other):
self.add_clauses(other.clauses)
return self
def all_predicates(self):
predicates = set()
for c in self.clauses:
predicates |= {arg.lit for arg in c}
return predicates
def _or(self, cnf):
clauses = set()
for a, b in product(self.clauses, cnf.clauses):
tmp = set(a)
tmp.update(b)
clauses.add(frozenset(tmp))
return CNF(clauses)
def _and(self, cnf):
clauses = self.clauses.union(cnf.clauses)
return CNF(clauses)
def _not(self):
clss = list(self.clauses)
ll = {frozenset((~x,)) for x in clss[-1]}
ll = CNF(ll)
for rest in clss[:-1]:
p = {frozenset((~x,)) for x in rest}
ll = ll._or(CNF(p))
return ll
def rcall(self, expr):
clause_list = []
for clause in self.clauses:
lits = [arg.rcall(expr) for arg in clause]
clause_list.append(OR(*lits))
expr = AND(*clause_list)
return distribute_AND_over_OR(expr)
@classmethod
def all_or(cls, *cnfs):
b = cnfs[0].copy()
for rest in cnfs[1:]:
b = b._or(rest)
return b
@classmethod
def all_and(cls, *cnfs):
b = cnfs[0].copy()
for rest in cnfs[1:]:
b = b._and(rest)
return b
@classmethod
def to_CNF(cls, expr):
expr = to_NNF(expr)
expr = distribute_AND_over_OR(expr)
return expr
@classmethod
def CNF_to_cnf(cls, cnf):
"""
Converts CNF object to SymPy's boolean expression
retaining the form of expression.
"""
def remove_literal(arg):
return Not(arg.lit) if arg.is_Not else arg.lit
return And(*(Or(*(remove_literal(arg) for arg in clause)) for clause in cnf.clauses))
| CNF |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/builders.py | {
"start": 3979,
"end": 4831
} | class ____(Builder):
def __init__(self, type: type, *args: Any, **kwargs: Any) -> None:
super(PrimitiveBuilder, self).__init__(*args, **kwargs)
self.type = type
def build(self) -> dict:
schema = {}
if issubclass(self.type, six.string_types):
obj_type = "string"
elif issubclass(self.type, bool):
obj_type = "boolean"
elif issubclass(self.type, int):
obj_type = "number"
elif issubclass(self.type, float):
obj_type = "number"
else:
raise errors.FieldNotSupported("Can't specify value schema!", self.type)
if self.nullable:
obj_type = [obj_type, "null"]
schema["type"] = obj_type
if self.has_default:
schema["default"] = self.default
return schema
| PrimitiveBuilder |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-construct/test_flow.py | {
"start": 5221,
"end": 7698
} | class ____(Executor):
@requests
def foo(self, docs, **kwargs):
for d in docs:
d.embedding = np.array([1, 2, 3])
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_with_publish_driver(protocol):
def validate(da):
for d in da:
assert d.embedding is not None
f = (
Flow(protocol=protocol)
.add(name='r2', uses=DummyOneHotTextEncoder)
.add(name='r3', uses=DummyOneHotTextEncoder, needs='gateway')
.needs(needs=['r2', 'r3'])
)
with f:
da = f.index([Document(text='text_1'), Document(text='text_2')])
_validate_flow(f)
validate(da)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_arbitrary_needs(protocol):
f = (
Flow(protocol=protocol)
.add(name='p1')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.add(name='p4', needs='gateway')
.add(name='p5', needs='gateway')
.needs(['p2', 'p4'], name='r1')
.needs(['p3', 'p5'], name='r2')
.needs(['p1', 'r1'], name='r3')
.needs(['r2', 'r3'], name='r4')
)
with f:
f.index([Document(text='abbcs'), Document(text='efgh')])
_validate_flow(f)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_needs_all(protocol):
f = Flow(protocol=protocol).add(name='p1', needs='gateway').needs_all(name='r1')
assert f._deployment_nodes['r1'].needs == {'p1'}
f = (
Flow(protocol=protocol)
.add(name='p1', needs='gateway')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.needs(needs=['p1', 'p2'], name='r1')
.needs_all(name='r2')
)
assert f._deployment_nodes['r2'].needs == {'p3', 'r1'}
with f:
f.index(from_ndarray(np.random.random([10, 10])))
f = (
Flow(protocol=protocol)
.add(name='p1', needs='gateway')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.needs(needs=['p1', 'p2'], name='r1')
.needs_all(name='r2')
.add(name='p4', needs='r2')
)
assert f._deployment_nodes['r2'].needs == {'p3', 'r1'}
assert f._deployment_nodes['p4'].needs == {'r2'}
with f:
f.index(from_ndarray(np.random.random([10, 10])))
_validate_flow(f)
| DummyOneHotTextEncoder |
python | FactoryBoy__factory_boy | factory/base.py | {
"start": 2906,
"end": 2981
} | class ____:
abstract = True
strategy = enums.CREATE_STRATEGY
| BaseMeta |
python | PrefectHQ__prefect | src/integrations/prefect-redis/prefect_redis/blocks.py | {
"start": 301,
"end": 5774
} | class ____(WritableFileSystem):
"""
Block used to manage authentication with a Redis database
Attributes:
host: The host of the Redis server
port: The port the Redis server is running on
db: The database to write to and read from
username: The username to use when connecting to the Redis server
password: The password to use when connecting to the Redis server
ssl: Whether to use SSL when connecting to the Redis server
Example:
Create a new block from hostname, username and password:
```python
from prefect_redis import RedisDatabase
block = RedisDatabase(
host="myredishost.com", username="redis", password="SuperSecret")
block.save("BLOCK_NAME")
```
Create a new block from a connection string
```python
from prefect_redis import RedisBlock
block = RedisBlock.from_url(""redis://redis:SuperSecret@myredishost.com:6379")
block.save("BLOCK_NAME")
```
Get Redis client in order to interact directly with Redis
```python
from prefect_redis import RedisBlock
block = RedisBlock.load("BLOCK_NAME")
redis_client = block.get_client()
```
"""
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/dfb02cfce09ce3ca88fea097659a83554dd7a850-596x512.png"
_block_type_name = "Redis Database"
host: str = Field(default="localhost", description="Redis hostname")
port: int = Field(default=DEFAULT_PORT, description="Redis port")
db: int = Field(default=0, description="Redis DB index")
username: Optional[SecretStr] = Field(default=None, description="Redis username")
password: Optional[SecretStr] = Field(default=None, description="Redis password")
ssl: bool = Field(default=False, description="Whether to use SSL")
def block_initialization(self) -> None:
"""Validate parameters"""
if not self.host:
raise ValueError("Missing hostname")
if self.username and not self.password:
raise ValueError("Missing password")
async def read_path(self, path: str) -> bytes:
"""Read a redis key
Args:
path: Redis key to read from
Returns:
Contents at key as bytes
"""
client = self.get_async_client()
ret = await client.get(path)
await client.close()
return ret
async def write_path(self, path: str, content: bytes) -> None:
"""Write to a redis key
Args:
path: Redis key to write to
content: Binary object to write
"""
client = self.get_async_client()
ret = await client.set(path, content)
await client.close()
return ret
def get_client(self) -> redis.Redis:
"""Get Redis Client
Returns:
An initialized Redis async client
"""
return redis.Redis(
host=self.host,
port=self.port,
username=self.username.get_secret_value() if self.username else None,
password=self.password.get_secret_value() if self.password else None,
db=self.db,
ssl=self.ssl,
)
def get_async_client(self) -> redis.asyncio.Redis:
"""Get Redis Client
Returns:
An initialized Redis async client
"""
return redis.asyncio.Redis(
host=self.host,
port=self.port,
username=self.username.get_secret_value() if self.username else None,
password=self.password.get_secret_value() if self.password else None,
db=self.db,
ssl=self.ssl,
)
@classmethod
def from_connection_string(
cls, connection_string: Union[str, SecretStr]
) -> "RedisDatabase":
"""Create block from a Redis connection string
Supports the following URL schemes:
- `redis://` creates a TCP socket connection
- `rediss://` creates a SSL wrapped TCP socket connection
Args:
connection_string: Redis connection string
Returns:
`RedisCredentials` instance
"""
connection_kwargs = parse_url(
connection_string
if isinstance(connection_string, str)
else connection_string.get_secret_value()
)
ssl = connection_kwargs.get("connection_class") == redis.asyncio.SSLConnection
return cls(
host=connection_kwargs.get("host", "localhost"),
port=connection_kwargs.get("port", DEFAULT_PORT),
db=connection_kwargs.get("db", 0),
username=connection_kwargs.get("username"),
password=connection_kwargs.get("password"),
ssl=ssl,
)
def as_connection_params(self) -> Dict[str, Any]:
"""
Return a dictionary suitable for unpacking
"""
data = self.model_dump()
data.pop("block_type_slug", None)
# Unwrap SecretStr fields
if self.username is not None:
data["username"] = self.username.get_secret_value()
else:
data.pop("username", None)
if self.password is not None:
data["password"] = self.password.get_secret_value()
else:
data.pop("password", None)
return data
| RedisDatabase |
python | pypa__hatch | tests/backend/metadata/test_hatch.py | {
"start": 3679,
"end": 4175
} | class ____:
def test_unknown(self, isolation):
with pytest.raises(ValueError, match="Unknown version source: foo"):
_ = HatchMetadata(isolation, {"version": {"source": "foo"}}, PluginManager()).version.source
def test_cached(self, isolation):
metadata = HatchMetadata(isolation, {"version": {}}, PluginManager())
assert metadata.version.source is metadata.version.source
assert isinstance(metadata.version.source, RegexSource)
| TestVersionSource |
python | huggingface__transformers | src/transformers/models/internvl/modeling_internvl.py | {
"start": 21501,
"end": 30245
} | class ____(InternVLPreTrainedModel):
_checkpoint_conversion_mapping = {
r"^language_model.model": "language_model",
}
def __init__(self, config: InternVLConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = InternVLMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
vision_feature_layer (`int` or `list[int]`):
Layer index or list of layer indices to extract features from.
Returns:
vision_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`.
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility
downsample_ratio = self.config.downsample_ratio
if vision_feature_layer == -1:
vision_features = self.vision_tower(pixel_values=pixel_values).last_hidden_state
else:
vision_features = self.vision_model(pixel_values=pixel_values).hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
vision_features = vision_features[:, 1:, :]
# Calculate dimensions based on vision features
channels = vision_features.shape[1]
feature_size = int(channels**0.5)
batch_size = vision_features.shape[0]
# Reshape tensor to spatial dimensions
vision_features = vision_features.reshape(batch_size, feature_size, feature_size, -1)
# Apply downsampling using pixel shuffle
vision_features = self.pixel_shuffle(vision_features, scale_factor=downsample_ratio)
# Reshape tensor to prepare for projection
vision_features = vision_features.reshape(batch_size, -1, vision_features.shape[-1])
# Project features through multi-modal projector
vision_features = self.multi_modal_projector(vision_features)
return vision_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, InternVLModelOutputWithPast]:
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
return InternVLModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
def pixel_shuffle(self, vision_features: torch.Tensor, scale_factor: float = 0.5):
"""Perform pixel shuffle downsampling on vision features.
Args:
vision_features (`torch.Tensor`):
Input tensor of shape (batch_size, width, height, channels).
scale_factor (`float`, *optional*, defaults to `0.5`):
Factor by which to downsample. Default is 0.5, which halves the dimensions.
Returns:
vision_features (`torch.Tensor`):
Downsampled tensor of shape (batch_size, height*scale_factor, width*scale_factor, channels/(scale_factor^2)).
"""
batch_size, width, height, channels = vision_features.size()
if height % scale_factor != 0 or width % scale_factor != 0:
raise ValueError("Height and width must be divisible by scale_factor for proper downsampling.")
# Reshape to allow downsampling
vision_features = vision_features.view(
batch_size, width, int(height * scale_factor), int(channels / scale_factor)
)
# Permute dimensions to align downsampled axis correctly
vision_features = vision_features.permute(0, 2, 1, 3).contiguous()
# Reshape to achieve final downsampled dimensions
vision_features = vision_features.view(
batch_size, int(height * scale_factor), int(width * scale_factor), int(channels / (scale_factor**2))
)
# Swap height and width back for proper orientation
vision_features = vision_features.permute(0, 2, 1, 3).contiguous()
return vision_features
@dataclass
@auto_docstring(
custom_intro="""
Base class for InternVL causal language model (or autoregressive) outputs.
"""
)
| InternVLModel |
python | google__jax | jax/_src/state/types.py | {
"start": 18595,
"end": 19214
} | class ____:
pass
uninitialized = Uninitialized()
_ref_type_aval_mappings: dict[
type[Any], Callable[[Any], tuple[AbstractRef, Array | Uninitialized]],
] = {}
def _default_value_to_ref_aval(x: Any) -> tuple[AbstractRef, Array]:
# Default type mapping just creates an AbstractRef from the array's aval.
aval = core.get_aval(x)
return AbstractRef(aval), x
def get_ref_aval_from_value(x: Any):
if type(x) in _ref_type_aval_mappings:
return _ref_type_aval_mappings[type(x)](x)
return _default_value_to_ref_aval(x)
# === pinned, chained LinearVals ===
@dataclasses.dataclass(frozen=True)
| Uninitialized |
python | davidhalter__jedi | test/completion/pytest.py | {
"start": 246,
"end": 3399
} | class ____():
pass
# -----------------
# goto/infer
# -----------------
#! 18 ['def my_conftest_fixture']
def test_x(my_conftest_fixture, my_fixture, my_not_existing_fixture, my_yield_fixture):
#? str()
my_fixture
#? int()
my_yield_fixture
#?
my_not_existing_fixture
#? float()
return my_conftest_fixture
#? 18 float()
def test_x(my_conftest_fixture, my_fixture):
pass
#! 18 ['param MyClassFixture']
def test_x(MyClassFixture):
#?
MyClassFixture
#? 15
def lala(my_fixture):
pass
@pytest.fixture
#? 15 str()
def lala(my_fixture):
pass
#! 15 ['param my_fixture']
def lala(my_fixture):
pass
@pytest.fixture
#! 15 ['def my_fixture']
def lala(my_fixture):
pass
# overriding types of a fixture should be possible
def test_x(my_yield_fixture: str):
#? str()
my_yield_fixture
# -----------------
# completion
# -----------------
#? 34 ['my_fixture']
def test_x(my_simple_fixture, my_fixture):
return
#? 34 ['my_fixture']
def test_x(my_simple_fixture, my_fixture):
return
#? ['my_fixture']
def test_x(my_simple_fixture, my_f
return
#? 18 ['my_simple_fixture']
def test_x(my_simple_fixture):
return
#? ['my_simple_fixture']
def test_x(my_simp
return
#? ['my_conftest_fixture']
def test_x(my_con
return
#? 18 ['my_conftest_fixture']
def test_x(my_conftest_fixture):
return
#? ['my_module_fixture']
def test_x(my_modu
return
#? []
def lala(my_con
return
@pytest.fixture
#? ['my_conftest_fixture']
def lala(my_con
return
@pytest.fixture
#? 15 ['my_conftest_fixture']
def lala(my_con):
return
@pytest.fixture
@some_decorator
#? ['my_conftest_fixture']
def lala(my_con
return
@pytest.fixture
@some_decorator
#? 15 ['my_conftest_fixture']
def lala(my_con):
return
# -----------------
# pytest owned fixtures
# -----------------
#? ['monkeypatch']
def test_p(monkeyp
#! 15 ['def monkeypatch']
def test_p(monkeypatch):
#? ['setattr']
monkeypatch.setatt
#? ['capsysbinary']
def test_p(capsysbin
def close_parens():
pass
# -----------------
# inheritance
# -----------------
@fixture
#? 40 ['inheritance_fixture']
def inheritance_fixture(inheritance_fixture):
#? str()
inheritance_fixture
#? ['upper']
inheritance_fixture.upper
return 1
#! 48 ['def inheritance_fixture']
def test_inheritance_fixture(inheritance_fixture, caplog):
#? int()
inheritance_fixture
#? ['set_level']
caplog.set_le
@pytest.fixture
def caplog(caplog):
yield caplog
# -----------------
# Generator with annotation
# -----------------
@pytest.fixture
def with_annot() -> Generator[float, None, None]:
pass
def test_with_annot(inheritance_fixture, with_annot):
#? float()
with_annot
# -----------------
# pytest external plugins
# -----------------
#? ['admin_user', 'admin_client']
def test_z(admin
#! 15 ['def admin_client']
def test_p(admin_client):
#? ['login', 'logout']
admin_client.log
@pytest.fixture
@some_decorator
#? ['admin_user']
def bla(admin_u
return
@pytest.fixture
@some_decorator
#! 12 ['def admin_user']
def bla(admin_user):
pass
| MyClassFixture |
python | kamyu104__LeetCode-Solutions | Python/maximum-sum-with-at-most-k-elements.py | {
"start": 72,
"end": 1713
} | class ____(object):
def maxSum(self, grid, limits, k):
"""
:type grid: List[List[int]]
:type limits: List[int]
:type k: int
:rtype: int
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
candidates = []
for i in xrange(len(grid)):
cnt = min(k, limits[i])
nth_element(grid[i], cnt-1, lambda a, b: a > b)
for j in xrange(cnt):
candidates.append(grid[i][j])
nth_element(candidates, k-1, lambda a, b: a > b)
return sum(candidates[i] for i in xrange(k))
| Solution |
python | crytic__slither | slither/printers/inheritance/inheritance_graph.py | {
"start": 960,
"end": 8994
} | class ____(AbstractPrinter):
ARGUMENT = "inheritance-graph"
HELP = "Export the inheritance graph of each contract to a dot file"
WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#inheritance-graph"
def __init__(self, slither, logger):
super().__init__(slither, logger)
inheritance = [x.inheritance for x in slither.contracts]
self.inheritance = {item for sublist in inheritance for item in sublist}
self.overshadowing_state_variables = {}
shadows = detect_state_variable_shadowing(slither.contracts)
for overshadowing_instance in shadows:
overshadowing_state_var = overshadowing_instance[1]
overshadowed_state_var = overshadowing_instance[3]
# Add overshadowing variable entry.
if overshadowing_state_var not in self.overshadowing_state_variables:
self.overshadowing_state_variables[overshadowing_state_var] = set()
self.overshadowing_state_variables[overshadowing_state_var].add(overshadowed_state_var)
def _get_pattern_var(self, var):
# Html pattern, each line is a row in a table
var_name = var.name
pattern = '<TR><TD align="left"> %s</TD></TR>'
pattern_contract = (
'<TR><TD align="left"> %s<font color="blue" POINT-SIZE="10"> (%s)</font></TD></TR>'
)
pattern_shadow = '<TR><TD align="left"><font color="red"> %s</font></TD></TR>'
pattern_contract_shadow = '<TR><TD align="left"><font color="red"> %s</font><font color="blue" POINT-SIZE="10"> (%s)</font></TD></TR>'
if isinstance(var.type, UserDefinedType) and isinstance(var.type.type, Contract):
if var in self.overshadowing_state_variables:
return pattern_contract_shadow % (var_name, var.type.type.name)
return pattern_contract % (var_name, var.type.type.name)
if var in self.overshadowing_state_variables:
return pattern_shadow % var_name
return pattern % var_name
@staticmethod
def _get_indirect_shadowing_information(contract):
"""
Obtain a string that describes variable shadowing for the given variable. None if no shadowing exists.
:param var: The variable to collect shadowing information for.
:param contract: The contract in which this variable is being analyzed.
:return: Returns a string describing variable shadowing for the given variable. None if no shadowing exists.
"""
# If this variable is an overshadowing variable, we'll want to return information describing it.
result = []
indirect_shadows = detect_c3_function_shadowing(contract)
for winner, colliding_functions in indirect_shadows.items():
collision_steps = ", ".join(
[f.contract_declarer.name for f in colliding_functions]
+ [winner.contract_declarer.name]
)
result.append(
f"'{winner.full_name}' collides in inherited contracts {collision_steps} where {winner.contract_declarer.name} is chosen."
)
return "\n".join(result)
def _summary(self, contract):
"""
Build summary using HTML
"""
ret = ""
# Remove contracts that have "mock" in the name and if --include-interfaces in False (default)
# removes inherited interfaces
inheritance = [
i
for i in contract.immediate_inheritance
if "mock" not in i.name.lower()
and (not i.is_interface or self.slither.include_interfaces)
]
# Add arrows (number them if there is more than one path so we know order of declaration for inheritance).
if len(inheritance) == 1:
immediate_inheritance = contract.immediate_inheritance[0]
ret += f"c{contract.id}_{contract.name} -> c{immediate_inheritance.id}_{immediate_inheritance};\n"
else:
for i, immediate_inheritance in enumerate(inheritance):
ret += f'c{contract.id}_{contract.name} -> c{immediate_inheritance.id}_{immediate_inheritance} [ label="{i + 1}" ];\n'
# Functions
visibilities = ["public", "external"]
public_functions = [
_get_pattern_func(f)
for f in contract.functions
if not f.is_constructor
and not f.is_constructor_variables
and not f.is_virtual
and f.contract_declarer == contract
and f.visibility in visibilities
]
public_functions = "".join(public_functions)
private_functions = [
_get_pattern_func(f)
for f in contract.functions
if not f.is_constructor
and not f.is_constructor_variables
and f.contract_declarer == contract
and f.visibility not in visibilities
]
private_functions = "".join(private_functions)
# Modifiers
modifiers = [
_get_pattern_func(m) for m in contract.modifiers if m.contract_declarer == contract
]
modifiers = "".join(modifiers)
# Public variables
public_variables = [
self._get_pattern_var(v)
for v in contract.state_variables_declared
if v.visibility in visibilities
]
public_variables = "".join(public_variables)
private_variables = [
self._get_pattern_var(v)
for v in contract.state_variables_declared
if v.visibility not in visibilities
]
private_variables = "".join(private_variables)
# Obtain any indirect shadowing information for this node.
indirect_shadowing_information = self._get_indirect_shadowing_information(contract)
# Build the node label
ret += f'c{contract.id}_{contract.name}[shape="box"'
ret += 'label=< <TABLE border="0">'
ret += f'<TR><TD align="center"><B>{contract.name}</B></TD></TR>'
if public_functions:
ret += '<TR><TD align="left"><I>Public Functions:</I></TD></TR>'
ret += f"{public_functions}"
if private_functions:
ret += '<TR><TD align="left"><I>Private Functions:</I></TD></TR>'
ret += f"{private_functions}"
if modifiers:
ret += '<TR><TD align="left"><I>Modifiers:</I></TD></TR>'
ret += f"{modifiers}"
if public_variables:
ret += '<TR><TD align="left"><I>Public Variables:</I></TD></TR>'
ret += f"{public_variables}"
if private_variables:
ret += '<TR><TD align="left"><I>Private Variables:</I></TD></TR>'
ret += f"{private_variables}"
if indirect_shadowing_information:
ret += (
'<TR><TD><BR/></TD></TR><TR><TD align="left" border="1"><font color="#777777" point-size="10">%s</font></TD></TR>'
% indirect_shadowing_information.replace("\n", "<BR/>")
)
ret += "</TABLE> >];\n"
return ret
def output(self, filename):
"""
Output the graph in filename
Args:
filename(string)
"""
if filename in ("", "."):
filename = "inheritance-graph.dot"
if not filename.endswith(".dot"):
filename += ".inheritance-graph.dot"
info = "Inheritance Graph: " + filename + "\n"
self.info(info)
content = 'digraph "" {\n'
for c in self.contracts:
if (
"mock" in c.name.lower()
or c.is_library
or (c.is_interface and not self.slither.include_interfaces)
):
continue
content += self._summary(c) + "\n"
content += "}"
with open(filename, "w", encoding="utf8") as f:
f.write(content)
res = self.generate_output(info)
res.add_file(filename, content)
return res
| PrinterInheritanceGraph |
python | scikit-learn__scikit-learn | sklearn/kernel_approximation.py | {
"start": 14971,
"end": 21005
} | class ____(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
"""Approximate feature map for "skewed chi-squared" kernel.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float, default=1.0
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int, default=100
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
weights and random offset when fitting the training data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
random_weights_ : ndarray of shape (n_features, n_components)
Weight array, sampled from a secant hyperbolic distribution, which will
be used to linearly transform the log of the data.
random_offset_ : ndarray of shape (n_features, n_components)
Bias term, which will be added to the data. It is uniformly distributed
between 0 and 2*pi.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
Examples
--------
>>> from sklearn.kernel_approximation import SkewedChi2Sampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> chi2_feature = SkewedChi2Sampler(skewedness=.01,
... n_components=10,
... random_state=0)
>>> X_features = chi2_feature.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=10, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=10)
>>> clf.score(X_features, y)
1.0
"""
_parameter_constraints: dict = {
"skewedness": [Interval(Real, None, None, closed="neither")],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
}
def __init__(self, *, skewedness=1.0, n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform))
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
if X.dtype == np.float32:
# Setting the data type of the fitted attribute will ensure the
# output data type during `transform`.
self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
self._n_features_out = self.n_components
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = validate_data(
self, X, copy=True, dtype=[np.float64, np.float32], reset=False
)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.0) / np.sqrt(self.n_components)
return projection
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
| SkewedChi2Sampler |
python | django__django | django/utils/deprecation.py | {
"start": 1251,
"end": 10242
} | class ____(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super().__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`."
% (class_name, old_method_name, new_method_name),
deprecation_warning,
2,
)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
def deprecate_posargs(deprecation_warning, remappable_names, /):
"""
Function/method decorator to deprecate some or all positional arguments.
The decorated function will map any positional arguments after the ``*`` to
the corresponding keyword arguments and issue a deprecation warning.
The decorator takes two arguments: a RemovedInDjangoXXWarning warning
category and a list of parameter names that have been changed from
positional-or-keyword to keyword-only, in their original positional order.
Works on both functions and methods. To apply to a class constructor,
decorate its __init__() method. To apply to a staticmethod or classmethod,
use @deprecate_posargs after @staticmethod or @classmethod.
Example: to deprecate passing option1 or option2 as posargs, change::
def some_func(request, option1, option2=True):
...
to::
@deprecate_posargs(RemovedInDjangoXXWarning, ["option1", "option2"])
def some_func(request, *, option1, option2=True):
...
After the deprecation period, remove the decorator (but keep the ``*``)::
def some_func(request, *, option1, option2=True):
...
Caution: during the deprecation period, do not add any new *positional*
parameters or change the remaining ones. For example, this attempt to add a
new param would break code using the deprecated posargs::
@deprecate_posargs(RemovedInDjangoXXWarning, ["option1", "option2"])
def some_func(request, wrong_new_param=None, *, option1, option2=True):
# Broken: existing code may pass a value intended as option1 in the
# wrong_new_param position.
...
However, it's acceptable to add new *keyword-only* parameters and to
re-order the existing ones, so long as the list passed to
@deprecate_posargs is kept in the original posargs order. This change will
work without breaking existing code::
@deprecate_posargs(RemovedInDjangoXXWarning, ["option1", "option2"])
def some_func(request, *, new_param=None, option2=True, option1):
...
The @deprecate_posargs decorator adds a small amount of overhead. In most
cases it won't be significant, but use with care in performance-critical
code paths.
"""
def decorator(func):
if isinstance(func, type):
raise TypeError(
"@deprecate_posargs cannot be applied to a class. (Apply it "
"to the __init__ method.)"
)
if isinstance(func, classmethod):
raise TypeError("Apply @classmethod before @deprecate_posargs.")
if isinstance(func, staticmethod):
raise TypeError("Apply @staticmethod before @deprecate_posargs.")
params = inspect.signature(func).parameters
num_by_kind = Counter(param.kind for param in params.values())
if num_by_kind[inspect.Parameter.VAR_POSITIONAL] > 0:
raise TypeError(
"@deprecate_posargs() cannot be used with variable positional `*args`."
)
num_positional_params = (
num_by_kind[inspect.Parameter.POSITIONAL_ONLY]
+ num_by_kind[inspect.Parameter.POSITIONAL_OR_KEYWORD]
)
num_keyword_only_params = num_by_kind[inspect.Parameter.KEYWORD_ONLY]
if num_keyword_only_params < 1:
raise TypeError(
"@deprecate_posargs() requires at least one keyword-only parameter "
"(after a `*` entry in the parameters list)."
)
if any(
name not in params or params[name].kind != inspect.Parameter.KEYWORD_ONLY
for name in remappable_names
):
raise TypeError(
"@deprecate_posargs() requires all remappable_names to be "
"keyword-only parameters."
)
num_remappable_args = len(remappable_names)
max_positional_args = num_positional_params + num_remappable_args
func_name = func.__name__
if func_name == "__init__":
# In the warning, show "ClassName()" instead of "__init__()".
# The class isn't defined yet, but its name is in __qualname__.
# Some examples of __qualname__:
# - ClassName.__init__
# - Nested.ClassName.__init__
# - MyTests.test_case.<locals>.ClassName.__init__
local_name = func.__qualname__.rsplit("<locals>.", 1)[-1]
class_name = local_name.replace(".__init__", "")
func_name = class_name
def remap_deprecated_args(args, kwargs):
"""
Move deprecated positional args to kwargs and issue a warning.
Return updated (args, kwargs).
"""
if (num_positional_args := len(args)) > max_positional_args:
raise TypeError(
f"{func_name}() takes at most {max_positional_args} positional "
f"argument(s) (including {num_remappable_args} deprecated) but "
f"{num_positional_args} were given."
)
# Identify which of the _potentially remappable_ params are
# actually _being remapped_ in this particular call.
remapped_names = remappable_names[
: num_positional_args - num_positional_params
]
conflicts = set(remapped_names) & set(kwargs)
if conflicts:
# Report duplicate names in the original parameter order.
conflicts_str = ", ".join(
f"'{name}'" for name in remapped_names if name in conflicts
)
raise TypeError(
f"{func_name}() got both deprecated positional and keyword "
f"argument values for {conflicts_str}."
)
# Do the remapping.
remapped_kwargs = dict(
zip(remapped_names, args[num_positional_params:], strict=True)
)
remaining_args = args[:num_positional_params]
updated_kwargs = kwargs | remapped_kwargs
# Issue the deprecation warning.
remapped_names_str = ", ".join(f"'{name}'" for name in remapped_names)
warnings.warn(
f"Passing positional argument(s) {remapped_names_str} to {func_name}() "
"is deprecated. Use keyword arguments instead.",
deprecation_warning,
skip_file_prefixes=django_file_prefixes(),
)
return remaining_args, updated_kwargs
if iscoroutinefunction(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
if len(args) > num_positional_params:
args, kwargs = remap_deprecated_args(args, kwargs)
return await func(*args, **kwargs)
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(args) > num_positional_params:
args, kwargs = remap_deprecated_args(args, kwargs)
return func(*args, **kwargs)
return wrapper
return decorator
| RenameMethodsBase |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 254226,
"end": 255742
} | class ____(unittest.TestCase):
def testSendAndRecvFds(self):
def close_pipes(pipes):
for fd1, fd2 in pipes:
os.close(fd1)
os.close(fd2)
def close_fds(fds):
for fd in fds:
os.close(fd)
# send 10 file descriptors
pipes = [os.pipe() for _ in range(10)]
self.addCleanup(close_pipes, pipes)
fds = [rfd for rfd, wfd in pipes]
# use a UNIX socket pair to exchange file descriptors locally
sock1, sock2 = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
with sock1, sock2:
socket.send_fds(sock1, [MSG], fds)
# request more data and file descriptors than expected
msg, fds2, flags, addr = socket.recv_fds(sock2, len(MSG) * 2, len(fds) * 2)
self.addCleanup(close_fds, fds2)
self.assertEqual(msg, MSG)
self.assertEqual(len(fds2), len(fds))
self.assertEqual(flags, 0)
# don't test addr
# test that file descriptors are connected
for index, fds in enumerate(pipes):
rfd, wfd = fds
os.write(wfd, str(index).encode())
for index, rfd in enumerate(fds2):
data = os.read(rfd, 100)
self.assertEqual(data, str(index).encode())
def setUpModule():
thread_info = support.threading_setup()
unittest.addModuleCleanup(support.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| SendRecvFdsTests |
python | astropy__astropy | astropy/io/ascii/basic.py | {
"start": 403,
"end": 684
} | class ____(core.BaseHeader):
"""
Basic table Header Reader.
Set a few defaults for common ascii table formats
(start at line 0, comments begin with ``#`` and possibly white space)
"""
start_line = 0
comment = r"\s*#"
write_comment = "# "
| BasicHeader |
python | pypa__setuptools | setuptools/_vendor/backports/tarfile/__init__.py | {
"start": 9569,
"end": 9656
} | class ____(TarError):
"""General exception for extract errors."""
pass
| ExtractError |
python | cython__cython | Cython/Compiler/Tests/TestTypes.py | {
"start": 59,
"end": 631
} | class ____(unittest.TestCase):
def test_widest_numeric_type(self):
def assert_widest(type1, type2, widest):
self.assertEqual(widest, PT.widest_numeric_type(type1, type2))
assert_widest(PT.c_int_type, PT.c_long_type, PT.c_long_type)
assert_widest(PT.c_double_type, PT.c_long_type, PT.c_double_type)
assert_widest(PT.c_longdouble_type, PT.c_long_type, PT.c_longdouble_type)
cenum = PT.CEnumType("E", "cenum", typedef_flag=False)
assert_widest(PT.c_int_type, cenum, PT.c_int_type)
| TestMethodDispatcherTransform |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 1290,
"end": 1469
} | class ____(GreatExpectationsError):
def __init__(self, col_name: str) -> None:
msg = f"Unhashable column: {col_name}"
super().__init__(msg)
| UnhashableColumnError |
python | scipy__scipy | scipy/odr/tests/test_odr.py | {
"start": 482,
"end": 22478
} | class ____:
# Bad Data for 'x'
def test_bad_data(self):
assert_raises(ValueError, Data, 2, 1)
assert_raises(ValueError, RealData, 2, 1)
# Empty Data for 'x'
def empty_data_func(self, B, x):
return B[0]*x + B[1]
def test_empty_data(self):
beta0 = [0.02, 0.0]
linear = Model(self.empty_data_func)
empty_dat = Data([], [])
with pytest.warns(OdrWarning):
ODR(empty_dat, linear, beta0=beta0)
empty_dat = RealData([], [])
with pytest.warns(OdrWarning):
ODR(empty_dat, linear, beta0=beta0)
# Explicit Example
def explicit_fcn(self, B, x):
ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2)
return ret
def explicit_fjd(self, B, x):
eBx = np.exp(B[2]*x)
ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx
return ret
def explicit_fjb(self, B, x):
eBx = np.exp(B[2]*x)
res = np.vstack([np.ones(x.shape[-1]),
np.power(eBx-1.0, 2),
B[1]*2.0*(eBx-1.0)*eBx*x])
return res
def test_explicit(self):
explicit_mod = Model(
self.explicit_fcn,
fjacb=self.explicit_fjb,
fjacd=self.explicit_fjd,
meta=dict(name='Sample Explicit Model',
ref='ODRPACK UG, pg. 39'),
)
explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.],
[1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6,
1213.8,1215.5,1212.])
explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1],
ifixx=[0,0,1,1,1,1,1,1,1,1,1,0])
explicit_odr.set_job(deriv=2)
explicit_odr.set_iprint(init=0, iter=0, final=0)
out = explicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.2646548050648876e+03, -5.4018409956678255e+01,
-8.7849712165253724e-02]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[4.4949592379003039e-01, -3.7421976890364739e-01,
-8.0978217468468912e-04],
[-3.7421976890364739e-01, 1.0529686462751804e+00,
-1.9453521827942002e-03],
[-8.0978217468468912e-04, -1.9453521827942002e-03,
1.6827336938454476e-05]]),
)
# Implicit Example
def implicit_fcn(self, B, x):
return (B[2]*np.power(x[0]-B[0], 2) +
2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) +
B[4]*np.power(x[1]-B[1], 2) - 1.0)
def test_implicit(self):
implicit_mod = Model(
self.implicit_fcn,
implicit=1,
meta=dict(name='Sample Implicit Model',
ref='ODRPACK UG, pg. 49'),
)
implicit_dat = Data([
[0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28,
-0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44],
[-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32,
-6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]],
1,
)
implicit_odr = ODR(implicit_dat, implicit_mod,
beta0=[-1.0, -3.0, 0.09, 0.02, 0.08])
out = implicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354,
0.0162299708984738, 0.0797537982976416]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314,
0.0027500347539902, 0.0034962501532468]),
)
assert_allclose(
out.cov_beta,
np.array([[2.1089274602333052e+00, -1.9437686411979040e+00,
7.0263550868344446e-02, -4.7175267373474862e-02,
5.2515575927380355e-02],
[-1.9437686411979040e+00, 2.0481509222414456e+00,
-6.1600515853057307e-02, 4.6268827806232933e-02,
-5.8822307501391467e-02],
[7.0263550868344446e-02, -6.1600515853057307e-02,
2.8659542561579308e-03, -1.4628662260014491e-03,
1.4528860663055824e-03],
[-4.7175267373474862e-02, 4.6268827806232933e-02,
-1.4628662260014491e-03, 1.2855592885514335e-03,
-1.2692942951415293e-03],
[5.2515575927380355e-02, -5.8822307501391467e-02,
1.4528860663055824e-03, -1.2692942951415293e-03,
2.0778813389755596e-03]]),
rtol=1e-6, atol=2e-6,
)
# Multi-variable Example
def multi_fcn(self, B, x):
if (x < 0.0).any():
raise OdrStop
theta = pi*B[3]/2.
ctheta = np.cos(theta)
stheta = np.sin(theta)
omega = np.power(2.*pi*x*np.exp(-B[2]), B[3])
phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta))
r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) +
np.power(omega*stheta, 2)), -B[4])
ret = np.vstack([B[1] + r*np.cos(B[4]*phi),
r*np.sin(B[4]*phi)])
return ret
def test_multi(self):
multi_mod = Model(
self.multi_fcn,
meta=dict(name='Sample Multi-Response Model',
ref='ODRPACK UG, pg. 56'),
)
multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0,
700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0,
15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0])
multi_y = np.array([
[4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713,
3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984,
2.934, 2.876, 2.838, 2.798, 2.759],
[0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309,
0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218,
0.202, 0.182, 0.168, 0.153, 0.139],
])
n = len(multi_x)
multi_we = np.zeros((2, 2, n), dtype=float)
multi_ifixx = np.ones(n, dtype=int)
multi_delta = np.zeros(n, dtype=float)
multi_we[0,0,:] = 559.6
multi_we[1,0,:] = multi_we[0,1,:] = -1634.0
multi_we[1,1,:] = 8397.0
for i in range(n):
if multi_x[i] < 100.0:
multi_ifixx[i] = 0
elif multi_x[i] <= 150.0:
pass # defaults are fine
elif multi_x[i] <= 1000.0:
multi_delta[i] = 25.0
elif multi_x[i] <= 10000.0:
multi_delta[i] = 560.0
elif multi_x[i] <= 100000.0:
multi_delta[i] = 9500.0
else:
multi_delta[i] = 144000.0
if multi_x[i] == 100.0 or multi_x[i] == 150.0:
multi_we[:,:,i] = 0.0
multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2),
we=multi_we)
multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5],
delta0=multi_delta, ifixx=multi_ifixx)
multi_odr.set_job(deriv=1, del_init=1)
out = multi_odr.run()
assert_array_almost_equal(
out.beta,
np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978,
0.5101147161764654, 0.5173902330489161]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757,
0.0132642749596149, 0.0288529201353984]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406,
-0.0058700836512467, 0.011281212888768],
[0.0036159705923791, 0.0064793789429006, 0.0517610978353126,
-0.0051181304940204, 0.0130726943624117],
[0.0438637051470406, 0.0517610978353126, 0.5182263323095322,
-0.0563083340093696, 0.1269490939468611],
[-0.0058700836512467, -0.0051181304940204, -0.0563083340093696,
0.0066939246261263, -0.0140184391377962],
[0.011281212888768, 0.0130726943624117, 0.1269490939468611,
-0.0140184391377962, 0.0316733013820852]]),
)
# Pearson's Data
# K. Pearson, Philosophical Magazine, 2, 559 (1901)
def pearson_fcn(self, B, x):
return B[0] + B[1]*x
def test_pearson(self):
p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4])
p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5])
p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.])
p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04])
p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)
# Reverse the data to test invariance of results
pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)
p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit'))
p_odr = ODR(p_dat, p_mod, beta0=[1.,1.])
pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.])
out = p_odr.run()
assert_array_almost_equal(
out.beta,
np.array([5.4767400299231674, -0.4796082367610305]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.3590121690702467, 0.0706291186037444]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0854275622946333, -0.0161807025443155],
[-0.0161807025443155, 0.003306337993922]]),
)
rout = pr_odr.run()
assert_array_almost_equal(
rout.beta,
np.array([11.4192022410781231, -2.0850374506165474]),
)
assert_array_almost_equal(
rout.sd_beta,
np.array([0.9820231665657161, 0.3070515616198911]),
)
assert_array_almost_equal(
rout.cov_beta,
np.array([[0.6391799462548782, -0.1955657291119177],
[-0.1955657291119177, 0.0624888159223392]]),
)
# Lorentz Peak
# The data is taken from one of the undergraduate physics labs I performed.
def lorentz(self, beta, x):
return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x -
beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0)))
def test_lorentz(self):
l_sy = np.array([.29]*18)
l_sx = np.array([.000972971,.000948268,.000707632,.000706679,
.000706074, .000703918,.000698955,.000456856,
.000455207,.000662717,.000654619,.000652694,
.000000859202,.00106589,.00106378,.00125483, .00140818,.00241839])
l_dat = RealData(
[3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
3.6562, 3.62498, 3.55525, 3.41886],
[652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122,
957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5],
sx=l_sx,
sy=l_sy,
)
l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))
out = l_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.4306780846149925e+03, 1.3390509034538309e-01,
3.7798193600109009e+00]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([7.3621186811330963e-01, 3.5068899941471650e-04,
2.4451209281408992e-04]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.4714409064597873e-01, -6.9067261911110836e-05,
-3.1236953270424990e-05],
[-6.9067261911110836e-05, 5.6077531517333009e-08,
3.6133261832722601e-08],
[-3.1236953270424990e-05, 3.6133261832722601e-08,
2.7261220025171730e-08]]),
)
def test_ticket_1253(self):
def linear(c, x):
return c[0]*x+c[1]
c = [2.0, 3.0]
x = np.linspace(0, 10)
y = linear(c, x)
model = Model(linear)
data = Data(x, y, wd=1.0, we=1.0)
job = ODR(data, model, beta0=[1.0, 1.0])
result = job.run()
assert_equal(result.info, 2)
# Verify fix for gh-9140
def test_ifixx(self):
x1 = [-2.01, -0.99, -0.001, 1.02, 1.98]
x2 = [3.98, 1.01, 0.001, 0.998, 4.01]
fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int)))
data = Data(np.vstack((x1, x2)), y=1, fix=fix)
model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True)
odr1 = ODR(data, model, beta0=np.array([1.]))
sol1 = odr1.run()
odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix)
sol2 = odr2.run()
assert_equal(sol1.beta, sol2.beta)
# verify bugfix for #11800 in #11802
def test_ticket_11800(self):
# parameters
beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5])
nr_measurements = 10
std_dev_x = 0.01
x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866,
-0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301],
[-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829,
0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]])
std_dev_y = 0.05
y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642,
0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929],
[0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536,
-0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]])
beta_solution = np.array([
2.62920235756665876536e+00, -1.26608484996299608838e+02,
1.29703572775403074502e+02, -1.88560985401185465804e+00,
7.83834160771274923718e+01, -7.64124076838087091801e+01])
# model's function and Jacobians
def func(beta, x):
y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :]
y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :]
return np.vstack((y0, y1))
def df_dbeta_odr(beta, x):
nr_meas = np.shape(x)[1]
zeros = np.zeros(nr_meas)
ones = np.ones(nr_meas)
dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros])
dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]])
return np.stack((dy0, dy1))
def df_dx_odr(beta, x):
nr_meas = np.shape(x)[1]
ones = np.ones(nr_meas)
dy0 = np.array([beta[1] * ones, beta[2] * ones])
dy1 = np.array([beta[4] * ones, beta[5] * ones])
return np.stack((dy0, dy1))
# do measurements with errors in independent and dependent variables
x0_true = np.linspace(1, 10, nr_measurements)
x1_true = np.linspace(1, 10, nr_measurements)
x_true = np.array([x0_true, x1_true])
y_true = func(beta_true, x_true)
x_meas = x_true + x_error
y_meas = y_true + y_error
# estimate model's parameters
model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr)
data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y)
odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100)
#odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1)
odr_obj.set_job(deriv=3)
odr_out = odr_obj.run()
# check results
assert_equal(odr_out.info, 1)
assert_array_almost_equal(odr_out.beta, beta_solution)
def test_multilinear_model(self):
x = np.linspace(0.0, 5.0)
y = 10.0 + 5.0 * x
data = Data(x, y)
odr_obj = ODR(data, multilinear)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [10.0, 5.0])
def test_exponential_model(self):
x = np.linspace(0.0, 5.0)
y = -10.0 + np.exp(0.5*x)
data = Data(x, y)
odr_obj = ODR(data, exponential)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [-10.0, 0.5])
def test_polynomial_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3
poly_model = polynomial(3)
data = Data(x, y)
odr_obj = ODR(data, poly_model)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0])
def test_unilinear_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x + 2.0
data = Data(x, y)
odr_obj = ODR(data, unilinear)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0])
def test_quadratic_model(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x ** 2 + 2.0 * x + 3.0
data = Data(x, y)
odr_obj = ODR(data, quadratic)
output = odr_obj.run()
assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0])
def test_work_ind(self):
def func(par, x):
b0, b1 = par
return b0 + b1 * x
# generate some data
n_data = 4
x = np.arange(n_data)
y = np.where(x % 2, x + 0.1, x - 0.1)
x_err = np.full(n_data, 0.1)
y_err = np.full(n_data, 0.1)
# do the fitting
linear_model = Model(func)
real_data = RealData(x, y, sx=x_err, sy=y_err)
odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4])
odr_obj.set_job(fit_type=0)
out = odr_obj.run()
sd_ind = out.work_ind['sd']
assert_array_almost_equal(out.sd_beta,
out.work[sd_ind:sd_ind + len(out.sd_beta)])
@pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better "
"not to run this test, see gh-13127")
def test_output_file_overwrite(self):
"""
Verify fix for gh-1892
"""
def func(b, x):
return b[0] + b[1] * x
p = Model(func)
data = Data(np.arange(10), 12 * np.arange(10))
tmp_dir = tempfile.mkdtemp()
error_file_path = os.path.join(tmp_dir, "error.dat")
report_file_path = os.path.join(tmp_dir, "report.dat")
try:
ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
rptfile=report_file_path).run()
ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
rptfile=report_file_path, overwrite=True).run()
finally:
# remove output files for clean up
shutil.rmtree(tmp_dir)
def test_odr_model_default_meta(self):
def func(b, x):
return b[0] + b[1] * x
p = Model(func)
p.set_meta(name='Sample Model Meta', ref='ODRPACK')
assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'})
def test_work_array_del_init(self):
"""
Verify fix for gh-18739 where del_init=1 fails.
"""
def func(b, x):
return b[0] + b[1] * x
# generate some data
n_data = 4
x = np.arange(n_data)
y = np.where(x % 2, x + 0.1, x - 0.1)
x_err = np.full(n_data, 0.1)
y_err = np.full(n_data, 0.1)
linear_model = Model(func)
# Try various shapes of the `we` array from various `sy` and `covy`
rd0 = RealData(x, y, sx=x_err, sy=y_err)
rd1 = RealData(x, y, sx=x_err, sy=0.1)
rd2 = RealData(x, y, sx=x_err, sy=[0.1])
rd3 = RealData(x, y, sx=x_err, sy=np.full((1, n_data), 0.1))
rd4 = RealData(x, y, sx=x_err, covy=[[0.01]])
rd5 = RealData(x, y, sx=x_err, covy=np.full((1, 1, n_data), 0.01))
for rd in [rd0, rd1, rd2, rd3, rd4, rd5]:
odr_obj = ODR(rd, linear_model, beta0=[0.4, 0.4],
delta0=np.full(n_data, -0.1))
odr_obj.set_job(fit_type=0, del_init=1)
# Just make sure that it runs without raising an exception.
odr_obj.run()
def test_pickling_data(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x + 2.0
data = Data(x, y)
obj_pickle = pickle.dumps(data)
del data
pickle.loads(obj_pickle)
def test_pickling_real_data(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x + 2.0
data = RealData(x, y)
obj_pickle = pickle.dumps(data)
del data
pickle.loads(obj_pickle)
def test_pickling_model(self):
obj_pickle = pickle.dumps(unilinear)
pickle.loads(obj_pickle)
def test_pickling_odr(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x + 2.0
odr_obj = ODR(Data(x, y), unilinear)
obj_pickle = pickle.dumps(odr_obj)
del odr_obj
pickle.loads(obj_pickle)
def test_pickling_output(self):
x = np.linspace(0.0, 5.0)
y = 1.0 * x + 2.0
output = ODR(Data(x, y), unilinear).run
obj_pickle = pickle.dumps(output)
del output
pickle.loads(obj_pickle)
def test_explicit_model_with_implicit_job(self):
"""
Verify fix for gh-23763 that ODR doesn't segfault
"""
x = np.linspace(0, 10, 10)
y = 2.0 + 3.0 * x
data = Data(x, y)
model = unilinear # this is an explicit model
# job=1 is implicit, should raise on explicit model
with assert_raises(OdrError):
odr = ODR(data, model, job=1)
odr.run()
| TestODR |
python | catalyst-team__catalyst | catalyst/contrib/losses/lovasz.py | {
"start": 8540,
"end": 9874
} | class ____(_Loss):
"""Creates a criterion that optimizes a multilabel Lovasz loss.
It has been proposed in `The Lovasz-Softmax loss: A tractable surrogate
for the optimization of the intersection-over-union measure
in neural networks`_.
.. _The Lovasz-Softmax loss\: A tractable surrogate for the optimization
of the intersection-over-union measure in neural networks:
https://arxiv.org/abs/1705.08790
"""
def __init__(self, per_image=False, ignore=None):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
self.ignore = ignore
self.per_image = per_image
def forward(self, logits, targets):
"""Forward propagation method for the Lovasz loss.
Args:
logits: [bs; num_classes; ...]
targets: [bs; num_classes; ...]
@TODO: Docs. Contribution is welcome.
"""
losses = [
_lovasz_hinge(
logits[:, i, ...],
targets[:, i, ...],
per_image=self.per_image,
ignore=self.ignore,
)
for i in range(logits.shape[1])
]
loss = torch.mean(torch.stack(losses))
return loss
__all__ = ["LovaszLossBinary", "LovaszLossMultiClass", "LovaszLossMultiLabel"]
| LovaszLossMultiLabel |
python | catalyst-team__catalyst | catalyst/contrib/data/dataset_cv.py | {
"start": 334,
"end": 2418
} | class ____(PathsDataset):
"""
Dataset class that derives targets from samples filesystem paths.
Dataset structure should be the following:
.. code-block:: bash
rootpath/
|-- class1/ # folder of N images
| |-- image11
| |-- image12
| ...
| `-- image1N
...
`-- classM/ # folder of K images
|-- imageM1
|-- imageM2
...
`-- imageMK
"""
def __init__(
self,
rootpath: str,
target_key: str = "targets",
dir2class: Optional[Mapping[str, int]] = None,
dict_transform: Optional[Callable[[Dict], Dict]] = None,
) -> None:
"""Constructor method for the :class:`ImageFolderDataset` class.
Args:
rootpath: root directory of dataset
target_key: key to use to store target label
dir2class (Mapping[str, int], optional): mapping from folder name
to class index
dict_transform (Callable[[Dict], Dict]], optional): transforms
to use on dict
"""
files = glob.iglob(f"{rootpath}/**/*")
images = sorted(filter(has_image_extension, files))
if dir2class is None:
dirs = sorted({Path(f).parent.name for f in images})
dir2class = {dirname: index for index, dirname in enumerate(dirs)}
super().__init__(
filenames=images,
open_fn=ReaderCompose(
[
ImageReader(input_key="image", rootpath=rootpath),
ScalarReader(
input_key=target_key,
output_key=target_key,
dtype=int,
default_value=-1,
),
]
),
label_fn=lambda fn: dir2class[Path(fn).parent.name],
features_key="image",
target_key=target_key,
dict_transform=dict_transform,
)
__all__ = ["ImageFolderDataset"]
| ImageFolderDataset |
python | TheAlgorithms__Python | linear_algebra/src/lib.py | {
"start": 7394,
"end": 14307
} | class ____:
"""
class: Matrix
This class represents an arbitrary matrix.
Overview of the methods:
__init__():
__str__(): returns a string representation
__add__(other: Matrix): matrix addition
__sub__(other: Matrix): matrix subtraction
__mul__(other: float): scalar multiplication
__mul__(other: Vector): vector multiplication
height() : returns height
width() : returns width
component(x: int, y: int): returns specified component
change_component(x: int, y: int, value: float): changes specified component
minor(x: int, y: int): returns minor along (x, y)
cofactor(x: int, y: int): returns cofactor along (x, y)
determinant() : returns determinant
"""
def __init__(self, matrix: list[list[float]], w: int, h: int) -> None:
"""
simple constructor for initializing the matrix with components.
"""
self.__matrix = matrix
self.__width = w
self.__height = h
def __str__(self) -> str:
"""
returns a string representation of this matrix.
"""
ans = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__(self, other: Matrix) -> Matrix:
"""
implements matrix addition.
"""
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
row = [
self.__matrix[i][j] + other.component(i, j)
for j in range(self.__width)
]
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__(self, other: Matrix) -> Matrix:
"""
implements matrix subtraction.
"""
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
row = [
self.__matrix[i][j] - other.component(i, j)
for j in range(self.__width)
]
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
else:
raise Exception("matrices must have the same dimension!")
@overload
def __mul__(self, other: float) -> Matrix: ...
@overload
def __mul__(self, other: Vector) -> Vector: ...
def __mul__(self, other: float | Vector) -> Vector | Matrix:
"""
implements the matrix-vector multiplication.
implements the matrix-scalar multiplication
"""
if isinstance(other, Vector): # matrix-vector
if len(other) == self.__width:
ans = zero_vector(self.__height)
for i in range(self.__height):
prods = [
self.__matrix[i][j] * other.component(j)
for j in range(self.__width)
]
ans.change_component(i, sum(prods))
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!"
)
elif isinstance(other, (int, float)): # matrix-scalar
matrix = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(matrix, self.__width, self.__height)
return None
def height(self) -> int:
"""
getter for the height
"""
return self.__height
def width(self) -> int:
"""
getter for the width
"""
return self.__width
def component(self, x: int, y: int) -> float:
"""
returns the specified (x,y) component
"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds")
def change_component(self, x: int, y: int, value: float) -> None:
"""
changes the x-y component of this matrix
"""
if 0 <= x < self.__height and 0 <= y < self.__width:
self.__matrix[x][y] = value
else:
raise Exception("change_component: indices out of bounds")
def minor(self, x: int, y: int) -> float:
"""
returns the minor along (x, y)
"""
if self.__height != self.__width:
raise Exception("Matrix is not square")
minor = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(minor)):
minor[i] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(minor, self.__width - 1, self.__height - 1).determinant()
def cofactor(self, x: int, y: int) -> float:
"""
returns the cofactor (signed minor) along (x, y)
"""
if self.__height != self.__width:
raise Exception("Matrix is not square")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(x, y)
else:
raise Exception("Indices out of bounds")
def determinant(self) -> float:
"""
returns the determinant of an nxn matrix using Laplace expansion
"""
if self.__height != self.__width:
raise Exception("Matrix is not square")
if self.__height < 1:
raise Exception("Matrix has no element")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
cofactor_prods = [
self.__matrix[0][y] * self.cofactor(0, y) for y in range(self.__width)
]
return sum(cofactor_prods)
def square_zero_matrix(n: int) -> Matrix:
"""
returns a square zero-matrix of dimension NxN
"""
ans: list[list[float]] = [[0] * n for _ in range(n)]
return Matrix(ans, n, n)
def random_matrix(width: int, height: int, a: int, b: int) -> Matrix:
"""
returns a random matrix WxH with integer components
between 'a' and 'b'
"""
random.seed(None)
matrix: list[list[float]] = [
[random.randint(a, b) for _ in range(width)] for _ in range(height)
]
return Matrix(matrix, width, height)
| Matrix |
python | dask__distributed | distributed/multi_lock.py | {
"start": 377,
"end": 4844
} | class ____:
"""An extension for the scheduler to manage MultiLocks
This adds the following routes to the scheduler
* multi_lock_acquire
* multi_lock_release
The approach is to maintain `self.locks` that maps a lock (unique name given to
`MultiLock(names=, ...)` at creation) to a list of users (instances of `MultiLock`)
that "requests" the lock. Additionally, `self.requests` maps a user to its requested
locks and `self.requests_left` maps a user to the number of locks still need.
Every time a user `x` gets to the front in `self.locks[name] = [x, ...]` it means
that `x` now holds the lock `name` and when it holds all the requested locks
`acquire()` can return.
Finally, `self.events` contains all the events users are waiting on to finish.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.locks = defaultdict(list) # lock -> users
self.requests = {} # user -> locks
self.requests_left = {} # user -> locks still needed
self.events = {}
self.scheduler.handlers.update(
{"multi_lock_acquire": self.acquire, "multi_lock_release": self.release}
)
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool:
"""Request locks
Parameters
----------
locks: List[str]
Names of the locks to request.
id: Hashable
Identifier of the `MultiLock` instance requesting the locks.
num_locks: int
Number of locks in `locks` requesting
Return
------
result: bool
Whether `num_locks` requested locks are free immediately or not.
"""
assert id not in self.requests
self.requests[id] = set(locks)
assert len(locks) >= num_locks and num_locks > 0
self.requests_left[id] = num_locks
locks = sorted(locks, key=lambda x: len(self.locks[x]))
for i, lock in enumerate(locks):
self.locks[lock].append(id)
if len(self.locks[lock]) == 1: # The lock was free
self.requests_left[id] -= 1
if self.requests_left[id] == 0: # Got all locks needed
# Since we got all locks need, we can remove the rest of the requests
self.requests[id] -= set(locks[i + 1 :])
return True
return False
def _refain_locks(self, locks, id):
"""Cancel/release previously requested/acquired locks
Parameters
----------
locks: List[str]
Names of the locks to refain.
id: Hashable
Identifier of the `MultiLock` instance refraining the locks.
"""
waiters_ready = set()
for lock in locks:
if self.locks[lock][0] == id:
self.locks[lock].pop(0)
if self.locks[lock]:
new_first = self.locks[lock][0]
self.requests_left[new_first] -= 1
if self.requests_left[new_first] <= 0:
# Notice, `self.requests_left[new_first]` might go below zero
# if more locks are freed than requested.
self.requests_left[new_first] = 0
waiters_ready.add(new_first)
else:
self.locks[lock].remove(id)
assert id not in self.locks[lock]
del self.requests[id]
del self.requests_left[id]
for waiter in waiters_ready:
self.scheduler.loop.add_callback(self.events[waiter].set)
@log_errors
async def acquire(self, locks=None, id=None, timeout=None, num_locks=None):
if not self._request_locks(locks, id, num_locks):
assert id not in self.events
event = asyncio.Event()
self.events[id] = event
future = event.wait()
if timeout is not None:
future = wait_for(future, timeout)
try:
await future
except TimeoutError:
self._refain_locks(locks, id)
return False
finally:
del self.events[id]
# At this point `id` acquired all `locks`
assert self.requests_left[id] == 0
return True
@log_errors
def release(self, id=None):
self._refain_locks(self.requests[id], id)
| MultiLockExtension |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_cond_format01.py | {
"start": 350,
"end": 2357
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format(
{
"font_color": "#9C0006",
"bg_color": "#FFC7CE",
"font_condense": 1,
"font_extend": 1,
}
)
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1:A1",
{
"type": "cell",
"format": cell_format,
"criteria": "greater than",
"value": 5,
},
)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_color_type(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format(
{
"font_color": Color("#9C0006"),
"bg_color": Color("#FFC7CE"),
"font_condense": 1,
"font_extend": 1,
}
)
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1:A1",
{
"type": "cell",
"format": cell_format,
"criteria": "greater than",
"value": 5,
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | automl__auto-sklearn | autosklearn/pipeline/components/feature_preprocessing/extra_trees_preproc_for_classification.py | {
"start": 560,
"end": 6035
} | class ____(AutoSklearnPreprocessingAlgorithm):
def __init__(
self,
n_estimators,
criterion,
min_samples_leaf,
min_samples_split,
max_features,
bootstrap,
max_leaf_nodes,
max_depth,
min_weight_fraction_leaf,
min_impurity_decrease,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
class_weight=None,
):
self.n_estimators = n_estimators
self.estimator_increment = 10
if criterion not in ("gini", "entropy"):
raise ValueError(
"'criterion' is not in ('gini', 'entropy'): " "%s" % criterion
)
self.criterion = criterion
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.bootstrap = bootstrap
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_impurity_decrease = min_impurity_decrease
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.class_weight = class_weight
self.preprocessor = None
def fit(self, X, Y, sample_weight=None):
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
self.n_estimators = int(self.n_estimators)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
self.bootstrap = check_for_bool(self.bootstrap)
self.n_jobs = int(self.n_jobs)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.max_features = self.max_features
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_samples_split = int(self.min_samples_split)
self.verbose = int(self.verbose)
max_features = int(X.shape[1] ** float(self.max_features))
estimator = ExtraTreesClassifier(
n_estimators=self.n_estimators,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
class_weight=self.class_weight,
)
estimator.fit(X, Y, sample_weight=sample_weight)
self.preprocessor = SelectFromModel(
estimator=estimator, threshold="mean", prefit=True
)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "ETC",
"name": "Extra Trees Classifier Preprocessing",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (INPUT,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
n_estimators = Constant("n_estimators", 100)
criterion = CategoricalHyperparameter(
"criterion", ["gini", "entropy"], default_value="gini"
)
max_features = UniformFloatHyperparameter(
"max_features", 0, 1, default_value=0.5
)
max_depth = UnParametrizedHyperparameter(name="max_depth", value="None")
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2
)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1
)
min_weight_fraction_leaf = UnParametrizedHyperparameter(
"min_weight_fraction_leaf", 0.0
)
min_impurity_decrease = UnParametrizedHyperparameter(
"min_impurity_decrease", 0.0
)
bootstrap = CategoricalHyperparameter(
"bootstrap", ["True", "False"], default_value="False"
)
cs.add_hyperparameters(
[
n_estimators,
criterion,
max_features,
max_depth,
max_leaf_nodes,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
min_impurity_decrease,
bootstrap,
]
)
return cs
| ExtraTreesPreprocessorClassification |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 111457,
"end": 111510
} | class ____(CPtrType):
is_null_ptr = 1
| CNullPtrType |
python | apache__airflow | airflow-core/tests/unit/models/test_renderedtifields.py | {
"start": 2657,
"end": 2844
} | class ____:
def __init__(self):
self.a = "a" * 5000
def __str__(self):
return self.a
max_length = conf.getint("core", "max_templated_field_length")
| LargeStrObject |
python | wandb__wandb | wandb/automations/integrations.py | {
"start": 218,
"end": 461
} | class ____(SlackIntegrationFields):
team_name: str
"""Slack workspace (not W&B team) where this integration will post messages."""
channel_name: str
"""Slack channel where this integration will post messages."""
| SlackIntegration |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/compute_logs.py | {
"start": 1147,
"end": 1477
} | class ____(graphene.ObjectType):
stdoutDownloadUrl = graphene.Field(graphene.String)
stdoutLocation = graphene.Field(graphene.String)
stderrDownloadUrl = graphene.Field(graphene.String)
stderrLocation = graphene.Field(graphene.String)
class Meta:
name = "CapturedLogsMetadata"
| GrapheneCapturedLogsMetadata |
python | facebook__pyre-check | api/query.py | {
"start": 646,
"end": 752
} | class ____(NamedTuple):
name: str
annotation: Optional[str]
kind: str
final: bool
| Attributes |
python | astropy__astropy | astropy/io/fits/hdu/compressed/header.py | {
"start": 1595,
"end": 23621
} | class ____(Header):
def __init__(self, *args, **kwargs):
warnings.warn(
"The CompImageHeader class is deprecated and will be removed in future",
AstropyDeprecationWarning,
)
super().__init__(*args, **kwargs)
def _is_reserved_table_keyword(keyword):
m = TDEF_RE.match(keyword)
return keyword in ("TFIELDS", "THEAP") or (
m and m.group("label").upper() in TABLE_KEYWORD_NAMES
)
def _is_reserved_compression_keyword(keyword):
m = ZDEF_RE.match(keyword)
return keyword in COMPRESSION_KEYWORDS or (
m and m.group("label").upper() in INDEXED_COMPRESSION_KEYWORDS
)
def _is_reserved_keyword(keyword):
return _is_reserved_table_keyword(keyword) or _is_reserved_compression_keyword(
keyword
)
def _bintable_header_to_image_header(bintable_header):
# Start with a copy of the table header.
image_header = bintable_header.copy()
bscale = image_header.get("BSCALE")
bzero = image_header.get("BZERO")
# Strip out special keywords
image_header.strip()
# Delete cards that are related to the table. And move
# the values of those cards that relate to the image from
# their corresponding table cards. These include
# ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn.
# (Note: Used set here instead of list in case there are any duplicate
# keywords, which there may be in some pathological cases:
# https://github.com/astropy/astropy/issues/2750
for keyword in set(image_header):
if _is_reserved_keyword(keyword) or keyword in ("CHECKSUM", "DATASUM"):
del image_header[keyword]
if bscale:
image_header["BSCALE"] = bscale
if bzero:
image_header["BZERO"] = bzero
hcomments = bintable_header.comments
if "ZSIMPLE" in bintable_header:
image_header.set(
"SIMPLE", bintable_header["ZSIMPLE"], hcomments["ZSIMPLE"], before=0
)
elif "ZTENSION" in bintable_header:
if bintable_header["ZTENSION"] != "IMAGE":
warnings.warn(
"ZTENSION keyword in compressed extension != 'IMAGE'",
AstropyUserWarning,
)
image_header.set("XTENSION", "IMAGE", hcomments["ZTENSION"], before=0)
else:
image_header.set("XTENSION", "IMAGE", before=0)
image_header.set(
"BITPIX", bintable_header["ZBITPIX"], hcomments["ZBITPIX"], before=1
)
image_header.set("NAXIS", bintable_header["ZNAXIS"], hcomments["ZNAXIS"], before=2)
last_naxis = "NAXIS"
for idx in range(image_header["NAXIS"]):
znaxis = "ZNAXIS" + str(idx + 1)
naxis = znaxis[1:]
image_header.set(
naxis, bintable_header[znaxis], hcomments[znaxis], after=last_naxis
)
last_naxis = naxis
# Delete any other spurious NAXISn keywords:
naxis = image_header["NAXIS"]
for keyword in list(image_header["NAXIS?*"]):
try:
n = int(keyword[5:])
except Exception:
continue
if n > naxis:
del image_header[keyword]
# Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs,
# ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs
# their values are always 0 and 1 respectively
if "ZPCOUNT" in bintable_header:
image_header.set(
"PCOUNT",
bintable_header["ZPCOUNT"],
hcomments["ZPCOUNT"],
after=last_naxis,
)
else:
image_header.set("PCOUNT", 0, after=last_naxis)
if "ZGCOUNT" in bintable_header:
image_header.set(
"GCOUNT", bintable_header["ZGCOUNT"], hcomments["ZGCOUNT"], after="PCOUNT"
)
else:
image_header.set("GCOUNT", 1, after="PCOUNT")
if "ZEXTEND" in bintable_header:
image_header.set("EXTEND", bintable_header["ZEXTEND"], hcomments["ZEXTEND"])
if "ZBLOCKED" in bintable_header:
image_header.set("BLOCKED", bintable_header["ZBLOCKED"], hcomments["ZBLOCKED"])
# Move the ZHECKSUM and ZDATASUM cards to the image header
# as CHECKSUM and DATASUM
if "ZHECKSUM" in bintable_header:
image_header.set("CHECKSUM", bintable_header["ZHECKSUM"], hcomments["ZHECKSUM"])
if "ZDATASUM" in bintable_header:
image_header.set("DATASUM", bintable_header["ZDATASUM"], hcomments["ZDATASUM"])
# Remove the EXTNAME card if the value in the table header
# is the default value of COMPRESSED_IMAGE.
if "EXTNAME" in image_header and image_header["EXTNAME"] == "COMPRESSED_IMAGE":
del image_header["EXTNAME"]
# Remove the PCOUNT GCOUNT cards if the uncompressed header is
# from a primary HDU
if "SIMPLE" in image_header:
del image_header["PCOUNT"]
del image_header["GCOUNT"]
# Fill in BLANK keyword if necessary
if image_header["BITPIX"] > 0 and "BLANK" not in image_header:
if "ZBLANK" in bintable_header:
image_header["BLANK"] = bintable_header["ZBLANK"]
else:
# check for column named "ZBLANK"
for i in range(1, bintable_header["TFIELDS"] + 1):
if bintable_header[f"TTYPE{i}"] == "ZBLANK":
# required BLANK keyword is missing
# use most negative value as default
image_header["BLANK"] = -(1 << (image_header["BITPIX"] - 1))
warnings.warn(
f"Setting default value {image_header['BLANK']} for "
"missing BLANK keyword in compressed extension",
AstropyUserWarning,
)
break
# Look to see if there are any blank cards in the table
# header. If there are, there should be the same number
# of blank cards in the image header. Add blank cards to
# the image header to make it so.
table_blanks = bintable_header._countblanks()
image_blanks = image_header._countblanks()
for _ in range(table_blanks - image_blanks):
image_header.append()
return image_header
def _image_header_to_empty_bintable(
image_header,
name=None,
huge_hdu=False,
compression_type=None,
tile_shape=None,
hcomp_scale=None,
hcomp_smooth=None,
quantize_level=None,
quantize_method=None,
dither_seed=None,
axes=None,
generate_dither_seed=None,
):
bintable = _CompBinTableHDU()
# NOTE: image_header is the header that a user would see as the image
# header which they might have set things like BSCALE and BZERO on, or
# added history or comments to.
# Update the extension name in the table header
bintable.header.set(
"EXTNAME",
name,
"name of this binary table extension",
after="TFIELDS",
)
# Set the compression type in the table header.
if compression_type:
if compression_type not in COMPRESSION_TYPES:
warnings.warn(
"Unknown compression type provided (supported are {}). "
"Default ({}) compression will be used.".format(
", ".join(map(repr, COMPRESSION_TYPES)),
DEFAULT_COMPRESSION_TYPE,
),
AstropyUserWarning,
)
compression_type = DEFAULT_COMPRESSION_TYPE
bintable.header.set(
"ZCMPTYPE", compression_type, "compression algorithm", after="TFIELDS"
)
else:
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
# If the input image header had BSCALE/BZERO cards, then insert
# them in the table header.
bzero = image_header.get("BZERO", 0.0)
bscale = image_header.get("BSCALE", 1.0)
after_keyword = "EXTNAME"
if bscale != 1.0:
bintable.header.set("BSCALE", bscale, after=after_keyword)
after_keyword = "BSCALE"
if bzero != 0.0:
bintable.header.set("BZERO", bzero, after=after_keyword)
try:
bitpix_comment = image_header.comments["BITPIX"]
except (AttributeError, KeyError):
bitpix_comment = "data type of original image"
try:
naxis_comment = image_header.comments["NAXIS"]
except (AttributeError, KeyError):
naxis_comment = "dimension of original image"
# Set the label for the first column in the table
bintable.header.set(
"TTYPE1", "COMPRESSED_DATA", "label for field 1", after="TFIELDS"
)
# Set the data format for the first column. It is dependent
# on the requested compression type.
if compression_type == "PLIO_1":
tform1 = "1QI" if huge_hdu else "1PI"
else:
tform1 = "1QB" if huge_hdu else "1PB"
bintable.header.set(
"TFORM1",
tform1,
"data format of field: variable length array",
after="TTYPE1",
)
# Create the first column for the table. This column holds the
# compressed data.
col1 = Column(name=bintable.header["TTYPE1"], format=tform1)
# Create the additional columns required for floating point
# data and calculate the width of the output table.
zbitpix = image_header["BITPIX"]
if zbitpix < 0 and quantize_level != 0.0:
# floating point image has 'COMPRESSED_DATA',
# 'GZIP_COMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using
# lossless compression, per CFITSIO)
ncols = 4
ttype2 = "GZIP_COMPRESSED_DATA"
# The required format for the GZIP_COMPRESSED_DATA is actually
# missing from the standard docs, but CFITSIO suggests it
# should be 1PB, which is logical.
tform2 = "1QB" if huge_hdu else "1PB"
# Set up the second column for the table that will hold any
# uncompressable data.
bintable.header.set("TTYPE2", ttype2, "label for field 2", after="TFORM1")
bintable.header.set(
"TFORM2",
tform2,
"data format of field: variable length array",
after="TTYPE2",
)
col2 = Column(name=ttype2, format=tform2)
# Set up the third column for the table that will hold
# the scale values for quantized data.
bintable.header.set("TTYPE3", "ZSCALE", "label for field 3", after="TFORM2")
bintable.header.set(
"TFORM3", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE3"
)
col3 = Column(name=bintable.header["TTYPE3"], format=bintable.header["TFORM3"])
# Set up the fourth column for the table that will hold
# the zero values for the quantized data.
bintable.header.set("TTYPE4", "ZZERO", "label for field 4", after="TFORM3")
bintable.header.set(
"TFORM4", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE4"
)
after = "TFORM4"
col4 = Column(name=bintable.header["TTYPE4"], format=bintable.header["TFORM4"])
# Create the ColDefs object for the table
cols = ColDefs([col1, col2, col3, col4])
else:
# default table has just one 'COMPRESSED_DATA' column
ncols = 1
after = "TFORM1"
# Create the ColDefs object for the table
cols = ColDefs([col1])
# Update the table header with the width of the table, the
# number of fields in the table, the indicator for a compressed
# image HDU, the data type of the image data and the number of
# dimensions in the image data array.
bintable.header.set("NAXIS1", cols.dtype.itemsize, "width of table in bytes")
bintable.header.set(
"TFIELDS", ncols, "number of fields in each row", after="GCOUNT"
)
bintable.header.set(
"ZIMAGE", True, "extension contains compressed image", after=after
)
bintable.header.set("ZBITPIX", zbitpix, bitpix_comment, after="ZIMAGE")
bintable.header.set("ZNAXIS", image_header["NAXIS"], naxis_comment, after="ZBITPIX")
# Verify that any input tile size parameter is the appropriate
# size to match the HDU's data.
tile_shape = _validate_tile_shape(
tile_shape=tile_shape,
compression_type=compression_type,
image_header=image_header,
)
# Set up locations for writing the next cards in the header.
last_znaxis = "ZNAXIS"
if image_header["NAXIS"] > 0:
after1 = "ZNAXIS1"
else:
after1 = "ZNAXIS"
# Calculate the number of rows in the output table and
# write the ZNAXISn and ZTILEn cards to the table header.
nrows = 0
for idx, axis in enumerate(axes):
naxis = "NAXIS" + str(idx + 1)
znaxis = "ZNAXIS" + str(idx + 1)
ztile = "ZTILE" + str(idx + 1)
ts = tile_shape[len(axes) - 1 - idx]
if not nrows:
nrows = (axis - 1) // ts + 1
else:
nrows *= (axis - 1) // ts + 1
if naxis in image_header:
bintable.header.set(
znaxis, axis, image_header.comments[naxis], after=last_znaxis
)
else:
bintable.header.set(
znaxis, axis, "length of original image axis", after=last_znaxis
)
bintable.header.set(ztile, ts, "size of tiles to be compressed", after=after1)
last_znaxis = znaxis
after1 = ztile
# Set the NAXIS2 header card in the table hdu to the number of
# rows in the table.
bintable.header.set("NAXIS2", nrows, "number of rows in table")
# Set the compression parameters in the table header based on the compression type
after_keyword = "ZCMPTYPE"
idx = 1
if compression_type == "RICE_1":
bintable.header.set(
"ZNAME1", "BLOCKSIZE", "compression block size", after=after_keyword
)
bintable.header.set(
"ZVAL1", DEFAULT_BLOCK_SIZE, "pixels per block", after="ZNAME1"
)
bintable.header.set(
"ZNAME2", "BYTEPIX", "bytes per pixel (1, 2, 4, or 8)", after="ZVAL1"
)
if bintable.header["ZBITPIX"] == 8:
bytepix = 1
elif bintable.header["ZBITPIX"] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
bintable.header.set(
"ZVAL2", bytepix, "bytes per pixel (1, 2, 4, or 8)", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
elif compression_type == "HCOMPRESS_1":
bintable.header.set(
"ZNAME1", "SCALE", "HCOMPRESS scale factor", after=after_keyword
)
bintable.header.set(
"ZVAL1", hcomp_scale, "HCOMPRESS scale factor", after="ZNAME1"
)
bintable.header.set(
"ZNAME2", "SMOOTH", "HCOMPRESS smooth option", after="ZVAL1"
)
bintable.header.set(
"ZVAL2", hcomp_smooth, "HCOMPRESS smooth option", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
if image_header["BITPIX"] < 0: # floating point image
bintable.header.set(
"ZNAME" + str(idx),
"NOISEBIT",
"floating point quantization level",
after=after_keyword,
)
bintable.header.set(
"ZVAL" + str(idx),
quantize_level,
"floating point quantization level",
after="ZNAME" + str(idx),
)
# Add the dither method and seed
if quantize_method:
if quantize_method not in [
NO_DITHER,
SUBTRACTIVE_DITHER_1,
SUBTRACTIVE_DITHER_2,
]:
name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD]
warnings.warn(
"Unknown quantization method provided. "
f"Default method ({name}) used."
)
quantize_method = DEFAULT_QUANTIZE_METHOD
if quantize_method == NO_DITHER:
zquantiz_comment = "No dithering during quantization"
else:
zquantiz_comment = "Pixel Quantization Algorithm"
bintable.header.set(
"ZQUANTIZ",
QUANTIZE_METHOD_NAMES[quantize_method],
zquantiz_comment,
after="ZVAL" + str(idx),
)
else:
# If the ZQUANTIZ keyword is missing the default is to assume
# no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD
# is set to
quantize_method = bintable.header.get("ZQUANTIZ", NO_DITHER)
if isinstance(quantize_method, str):
for k, v in QUANTIZE_METHOD_NAMES.items():
if v.upper() == quantize_method:
quantize_method = k
break
else:
quantize_method = NO_DITHER
if quantize_method == NO_DITHER:
if "ZDITHER0" in bintable.header:
# If dithering isn't being used then there's no reason to
# keep the ZDITHER0 keyword
del bintable.header["ZDITHER0"]
else:
if dither_seed:
dither_seed = generate_dither_seed(dither_seed)
elif "ZDITHER0" in bintable.header:
dither_seed = bintable.header["ZDITHER0"]
else:
dither_seed = generate_dither_seed(DEFAULT_DITHER_SEED)
bintable.header.set(
"ZDITHER0",
dither_seed,
"dithering offset when quantizing floats",
after="ZQUANTIZ",
)
# Move SIMPLE card from the image header to the
# table header as ZSIMPLE card.
if "SIMPLE" in image_header:
bintable.header.set(
"ZSIMPLE",
image_header["SIMPLE"],
image_header.comments["SIMPLE"],
before="ZBITPIX",
)
# Move EXTEND card from the image header to the
# table header as ZEXTEND card.
if "EXTEND" in image_header:
bintable.header.set(
"ZEXTEND", image_header["EXTEND"], image_header.comments["EXTEND"]
)
# Move BLOCKED card from the image header to the
# table header as ZBLOCKED card.
if "BLOCKED" in image_header:
bintable.header.set(
"ZBLOCKED",
image_header["BLOCKED"],
image_header.comments["BLOCKED"],
)
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in image_header:
bintable.header.set(
"ZTENSION",
"IMAGE",
image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in image_header:
bintable.header.set(
"ZPCOUNT",
image_header["PCOUNT"],
image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in image_header:
bintable.header.set(
"ZGCOUNT",
image_header["GCOUNT"],
image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# Move CHECKSUM and DATASUM cards from the image header to the
# table header as XHECKSUM and XDATASUM cards.
if "CHECKSUM" in image_header:
bintable.header.set(
"ZHECKSUM",
image_header["CHECKSUM"],
image_header.comments["CHECKSUM"],
)
if "DATASUM" in image_header:
bintable.header.set(
"ZDATASUM",
image_header["DATASUM"],
image_header.comments["DATASUM"],
)
bintable.columns = cols
# Add any keywords that are in the original header that are not already
# FIXME: don't use keyword_remaps, instead define an actual list to check
# including regular expressions for NAXIS and other similar keywords
# We need to add comments and history entries back to the right place, so
# we first write out regular keywords and keep track of what the previous
# regular keyword was for each position. We need to do this because
# add_comment and add_history automatically write out to the bottom of the
# header and there is no way to tell them to add them to the current
# location.
current_last_keyword = bintable.header.cards[-1].keyword
previous_keyword = []
for card in image_header.cards:
# We deal with blanks in _writeto, and we handle COMMENT and HISTORY
# only once we have written out the regular keywords. deal with after
if card.keyword == "":
bintable.header.add_blank()
elif _is_reserved_keyword(card.keyword):
warnings.warn(
f"Keyword {card.keyword!r} is reserved "
"for use by the FITS Tiled Image "
"Convention so will be ignored",
VerifyWarning,
)
elif (
card.keyword not in ("", "COMMENT", "HISTORY")
and card.keyword not in REMAPPED_KEYWORDS
and card.keyword not in bintable.header
and not card.keyword.startswith("NAXIS")
):
bintable.header.append(card)
current_last_keyword = card.keyword
previous_keyword.append(current_last_keyword)
# We loop over cards in reverse order for 'after=' to return the correct
# order in the final header.
for icard, card in list(enumerate(image_header.cards))[::-1]:
if card.keyword == "COMMENT":
bintable.header.add_comment(card.value, after=previous_keyword[icard])
elif card.keyword == "HISTORY":
bintable.header.add_history(card.value, after=previous_keyword[icard])
# TODO: avoid writing the same comment multiple times
return bintable
| CompImageHeader |
python | jd__tenacity | tenacity/nap.py | {
"start": 969,
"end": 1382
} | class ____:
"""Sleep strategy that waits on an event to be set."""
def __init__(self, event: "threading.Event") -> None:
self.event = event
def __call__(self, timeout: typing.Optional[float]) -> None:
# NOTE(harlowja): this may *not* actually wait for timeout
# seconds if the event is set (ie this may eject out early).
self.event.wait(timeout=timeout)
| sleep_using_event |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/indexer/routing_producer.py | {
"start": 620,
"end": 1010
} | class ____(NamedTuple):
"""
Payload suitable for the ``RoutingProducer``. ``MessageRouter`` works
with this payload type. The routing_headers are used to determine the
route for the message. The payload is the message body which should be sent
to the destination topic.
"""
routing_header: MutableMapping[str, Any]
routing_message: KafkaPayload
| RoutingPayload |
python | pytorch__pytorch | test/test_datapipe.py | {
"start": 110042,
"end": 123974
} | class ____(TestCase):
class CustomIterDataPipe(IterDataPipe):
@staticmethod
def add_one(x):
return x + 1
@classmethod
def classify(cls, x):
return 0
def add_v(self, x):
return x + self.v
def __init__(self, fn, source_dp=None):
self.fn = fn
self.source_dp = (
source_dp if source_dp else dp.iter.IterableWrapper([1, 2, 4])
)
self._dp = (
self.source_dp.map(self.add_one)
.map(self.add_v)
.demux(2, self.classify)[0]
)
self.v = 1
def __iter__(self):
yield from self._dp
def test_circular_serialization_with_pickle(self):
# Test for circular reference issue with pickle
dp1 = TestCircularSerialization.CustomIterDataPipe(fn=_fake_fn)
self.assertTrue(list(dp1) == list(pickle.loads(pickle.dumps(dp1))))
child_1 = dp1._dp
dm_1 = child_1.main_datapipe
m2_1 = dm_1.main_datapipe
m1_1 = m2_1.datapipe
src_1 = m1_1.datapipe
res1 = traverse_dps(dp1)
exp_res_1 = {
id(dp1): (
dp1,
{
id(src_1): (src_1, {}),
id(child_1): (
child_1,
{
id(dm_1): (
dm_1,
{
id(m2_1): (
m2_1,
{id(m1_1): (m1_1, {id(src_1): (src_1, {})})},
)
},
)
},
),
},
)
}
self.assertEqual(res1, exp_res_1)
dp2 = TestCircularSerialization.CustomIterDataPipe(fn=_fake_fn, source_dp=dp1)
self.assertTrue(list(dp2) == list(pickle.loads(pickle.dumps(dp2))))
child_2 = dp2._dp
dm_2 = child_2.main_datapipe
m2_2 = dm_2.main_datapipe
m1_2 = m2_2.datapipe
res2 = traverse_dps(dp2)
exp_res_2 = {
id(dp2): (
dp2,
{
id(dp1): (
dp1,
{
id(src_1): (src_1, {}),
id(child_1): (
child_1,
{
id(dm_1): (
dm_1,
{
id(m2_1): (
m2_1,
{
id(m1_1): (
m1_1,
{id(src_1): (src_1, {})},
)
},
)
},
)
},
),
},
),
id(child_2): (
child_2,
{
id(dm_2): (
dm_2,
{
id(m2_2): (
m2_2,
{
id(m1_2): (
m1_2,
{
id(dp1): (
dp1,
{
id(src_1): (src_1, {}),
id(child_1): (
child_1,
{
id(dm_1): (
dm_1,
{
id(m2_1): (
m2_1,
{
id(
m1_1
): (
m1_1,
{
id(
src_1
): (
src_1,
{},
)
},
)
},
)
},
)
},
),
},
),
},
)
},
)
},
)
},
),
},
)
}
self.assertEqual(res2, exp_res_2)
class LambdaIterDataPipe(CustomIterDataPipe):
def __init__(self, fn, source_dp=None):
super().__init__(fn, source_dp)
self.container = [
lambda x: x + 1,
]
self.lambda_fn = lambda x: x + 1
self._dp = (
self.source_dp.map(self.add_one)
.map(self.lambda_fn)
.map(self.add_v)
.demux(2, self.classify)[0]
)
@skipIfNoDill
@skipIf(True, "Dill Tests")
def test_circular_serialization_with_dill(self):
# Test for circular reference issue with dill
dp1 = TestCircularSerialization.LambdaIterDataPipe(lambda x: x + 1)
self.assertTrue(list(dp1) == list(dill.loads(dill.dumps(dp1))))
child_1 = dp1._dp
dm_1 = child_1.main_datapipe
m2_1 = dm_1.main_datapipe
m1_1 = m2_1.datapipe
src_1 = m1_1.datapipe
res1 = traverse_dps(dp1)
exp_res_1 = {
id(dp1): (
dp1,
{
id(src_1): (src_1, {}),
id(child_1): (
child_1,
{
id(dm_1): (
dm_1,
{
id(m2_1): (
m2_1,
{id(m1_1): (m1_1, {id(src_1): (src_1, {})})},
)
},
)
},
),
},
)
}
self.assertEqual(res1, exp_res_1)
dp2 = TestCircularSerialization.LambdaIterDataPipe(fn=_fake_fn, source_dp=dp1)
self.assertTrue(list(dp2) == list(dill.loads(dill.dumps(dp2))))
child_2 = dp2._dp
dm_2 = child_2.main_datapipe
m2_2 = dm_2.main_datapipe
m1_2 = m2_2.datapipe
res2 = traverse_dps(dp2)
exp_res_2 = {
id(dp2): (
dp2,
{
id(dp1): (
dp1,
{
id(src_1): (src_1, {}),
id(child_1): (
child_1,
{
id(dm_1): (
dm_1,
{
id(m2_1): (
m2_1,
{
id(m1_1): (
m1_1,
{id(src_1): (src_1, {})},
)
},
)
},
)
},
),
},
),
id(child_2): (
child_2,
{
id(dm_2): (
dm_2,
{
id(m2_2): (
m2_2,
{
id(m1_2): (
m1_2,
{
id(dp1): (
dp1,
{
id(src_1): (src_1, {}),
id(child_1): (
child_1,
{
id(dm_1): (
dm_1,
{
id(m2_1): (
m2_1,
{
id(
m1_1
): (
m1_1,
{
id(
src_1
): (
src_1,
{},
)
},
)
},
)
},
)
},
),
},
),
},
)
},
)
},
)
},
),
},
)
}
self.assertEqual(res2, exp_res_2)
| TestCircularSerialization |
python | astropy__astropy | astropy/coordinates/representation/spherical.py | {
"start": 601,
"end": 11193
} | class ____(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle'] or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude}
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat=None, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=COPY_IF_NEEDED),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=COPY_IF_NEEDED
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1.0 / u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return {"lon": sf_lon, "lat": sf_lat}
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
p = erfa_ufunc.s2c(self.lon, self.lat)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa c2s: P-vector to [unit]spherical coordinates.
return cls(*erfa_ufunc.c2s(p), copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO! for differential_class. This cannot (currently) be implemented
# like in the other Representations since `_re_represent_differentials`
# keeps differentials' unit keys, but this can result in a mismatch
# between the UnitSpherical expected key (e.g. "s") and that expected
# in the other class (here "s / m"). For more info, see PR #11467
if isinstance(other_class, type) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(
phi=self.lon,
theta=90 * u.deg - self.lat,
r=1.0,
copy=COPY_IF_NEEDED,
)
elif issubclass(other_class, SphericalRepresentation):
return other_class(
lon=self.lon,
lat=self.lat,
distance=1.0,
copy=COPY_IF_NEEDED,
)
return super().represent_as(other_class, differential_class)
def transform(self, matrix):
r"""Transform the unit-spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
Returns
-------
`~astropy.coordinates.UnitSphericalRepresentation` or `~astropy.coordinates.SphericalRepresentation`
If ``matrix`` is O(3) -- :math:`M \dot M^T = I` -- like a rotation,
then the result is a `~astropy.coordinates.UnitSphericalRepresentation`.
All other matrices will change the distance, so the dimensional
representation is used instead.
"""
# the transformation matrix does not need to be a rotation matrix,
# so the unit-distance is not guaranteed. For speed, we check if the
# matrix is in O(3) and preserves lengths.
if np.all(is_rotation_or_reflection(matrix)): # remain in unit-rep
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat = erfa_ufunc.c2s(p)
rep = self.__class__(lon=lon, lat=lat)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
rep = rep.with_differentials(new_diffs)
else: # switch to dimensional representation
rep = self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1, differentials=self.differentials
).transform(matrix)
return rep
def _scale_operation(self, op, *args):
return self._dimensional_representation(
lon=self.lon, lat=self.lat, distance=1.0, differentials=self.differentials
)._scale_operation(op, *args)
def __neg__(self):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super().__neg__()
result = self.__class__(self.lon + 180.0 * u.deg, -self.lat, copy=False)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, operator.neg), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity` ['dimensionless']
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs)
)
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs)
)
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other)
)
| UnitSphericalRepresentation |
python | astropy__astropy | astropy/extern/configobj/validate.py | {
"start": 11356,
"end": 11734
} | class ____(ValidateError):
"""An unknown check function was requested"""
def __init__(self, value):
"""
>>> raise VdtUnknownCheckError('yoda')
Traceback (most recent call last):
VdtUnknownCheckError: the check "yoda" is unknown.
"""
ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,))
| VdtUnknownCheckError |
python | PrefectHQ__prefect | tests/client/api/test_flow_runs.py | {
"start": 4971,
"end": 9566
} | class ____:
"""Test the count_flow_runs client method with various filter combinations."""
@pytest.fixture
async def flow_runs(self, flow, session):
flow_2 = await models.flows.create_flow(
session=session,
flow=actions.FlowCreate(name="flow-2", tags=["db"]),
)
flow_runs = []
# Flow 1 runs
flow_runs.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
name="flow-1-1",
tags=["db", "blue"],
state=schemas.states.Completed(),
),
)
)
flow_runs.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
name="flow-1-2",
tags=["db", "red"],
state=schemas.states.Failed(),
),
)
)
flow_runs.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
name="flow-1-3",
tags=["blue"],
state=schemas.states.Running(),
),
)
)
# Flow 2 runs
flow_runs.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow_2.id,
name="flow-2-1",
tags=["db"],
state=schemas.states.Completed(),
),
)
)
flow_runs.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow_2.id,
name="flow-2-2",
state=schemas.states.Scheduled(),
),
)
)
await session.commit()
return flow_runs
# Test parameters: [filter_kwargs, expected_count]
count_test_params = [
# No filters - should return all flow runs
({}, 5),
# Flow filters
({"flow_filter": filters.FlowFilter(name={"any_": ["flow-2"]})}, 2),
({"flow_filter": filters.FlowFilter(tags={"all_": ["db"]})}, 2),
# Flow run filters
({"flow_run_filter": filters.FlowRunFilter(name={"like_": "flow-2"})}, 2),
({"flow_run_filter": filters.FlowRunFilter(tags={"all_": ["db", "red"]})}, 1),
({"flow_run_filter": filters.FlowRunFilter(tags={"is_null_": True})}, 1),
# State filters
(
{
"flow_run_filter": filters.FlowRunFilter(
state={"type": {"any_": ["COMPLETED"]}}
)
},
2,
),
(
{
"flow_run_filter": filters.FlowRunFilter(
state={"type": {"any_": ["FAILED"]}}
)
},
1,
),
# Combined filters
(
{
"flow_filter": filters.FlowFilter(tags={"all_": ["db"]}),
"flow_run_filter": filters.FlowRunFilter(tags={"all_": ["db"]}),
},
1,
),
(
{
"flow_filter": filters.FlowFilter(name={"like_": "my-flow"}),
"flow_run_filter": filters.FlowRunFilter(
state={"type": {"any_": ["COMPLETED"]}}
),
},
1,
),
# Filters that should return nothing
({"flow_filter": filters.FlowFilter(name={"any_": ["nonexistent-flow"]})}, 0),
]
@pytest.mark.parametrize("filter_kwargs,expected_count", count_test_params)
async def test_async_count_flow_runs(
self, flow_runs, filter_kwargs, expected_count
):
async with get_client() as client:
count = await client.count_flow_runs(**filter_kwargs)
assert count == expected_count
@pytest.mark.parametrize("filter_kwargs,expected_count", count_test_params)
def test_sync_count_flow_runs(self, flow_runs, filter_kwargs, expected_count):
with get_client(sync_client=True) as client:
count = client.count_flow_runs(**filter_kwargs)
assert count == expected_count
| TestCountFlowRuns |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/dtensor_util.py | {
"start": 1797,
"end": 3962
} | class ____(values_lib.DistributedValues):
"""DistributedValue backed by a DTensor instance.
This class is useful to align the interface between DTensor and tf.distribute.
Most of the tf.distribute API will accept/return DistributedValue, whereas
DTensor low level API will only accept DTensor instance. In order to avoid
the conversion back and forth between DistributedValue and DTensor, we
introduce this class so that it can work with both side.
"""
def __init__(self, dtensor):
if context.executing_eagerly():
if not d_api.is_dtensor(dtensor):
raise ValueError("The DTensorDistributedValue can only be built with "
f"DTensor instance, got {type(dtensor)}")
super().__init__(d_api.unpack(dtensor))
else:
# We can't unpack the dtensor instance for now due to graph context.
# We will treat the dtensor instance as one global instance and let it
# return as a global replica instance.
# TODO(feyu): Support unpack in the graph context.
super().__init__([dtensor,])
self._dtensor = dtensor
def get_dtensor(self):
return self._dtensor
@property
def values(self):
# Note that this method exists so that it match the interface for PerReplica
# The public API in `tf.types.experimental.distributed.PerReplica` doesn't
# define any methods.
return self._values
def _dtensor_distributed_value_to_tensor(
var, dtype=None, name=None, as_ref=False):
del name
dtensor = var.get_dtensor()
if dtype is not None and not dtype.is_compatible_with(dtensor.dtype):
raise ValueError(
"Incompatible type conversion requested to type {!r} for variable "
"of type {!r}".format(dtype.name, dtensor.dtype.name))
if as_ref:
raise NotImplementedError(
"PerReplica doesn't support being used as a reference.")
return dtensor
# Register a conversion function to provide a useful error message when users
# try to use PerReplica values in the wrong contexts
tensor_conversion_registry.register_tensor_conversion_function(
DTensorDistributedValue, _dtensor_distributed_value_to_tensor)
| DTensorDistributedValue |
python | tox-dev__tox | src/tox/config/source/ini.py | {
"start": 620,
"end": 4467
} | class ____(Source):
"""Configuration sourced from a ini file (such as tox.ini)."""
CORE_SECTION = CORE
def __init__(self, path: Path, content: str | None = None) -> None:
super().__init__(path)
self._parser = ConfigParser(interpolation=None)
if content is None:
if not path.exists():
raise ValueError
content = path.read_text(encoding="utf-8")
self._parser.read_string(content, str(path))
self._section_mapping: defaultdict[str, list[str]] = defaultdict(list)
def transform_section(self, section: Section) -> Section: # noqa: PLR6301
return IniSection(section.prefix, section.name)
def sections(self) -> Iterator[IniSection]:
for section in self._parser.sections():
yield IniSection.from_key(section)
def get_loader(self, section: Section, override_map: OverrideMap) -> IniLoader | None:
# look up requested section name in the generative testenv mapping to find the real config source
for key in self._section_mapping.get(section.name) or []:
if section.prefix is None or Section.from_key(key).prefix == section.prefix:
break
else:
# if no matching section/prefix is found, use the requested section key as-is (for custom prefixes)
key = section.key
if self._parser.has_section(key):
return IniLoader(
section=section,
parser=self._parser,
overrides=override_map.get(section.key, []),
core_section=self.CORE_SECTION,
section_key=key,
)
return None
def get_core_section(self) -> Section:
return self.CORE_SECTION
def get_base_sections(self, base: list[str], in_section: Section) -> Iterator[Section]: # noqa: PLR6301
for a_base in base:
section = IniSection.from_key(a_base)
yield section # the base specifier is explicit
if in_section.prefix is not None: # no prefix specified, so this could imply our own prefix
yield IniSection(in_section.prefix, a_base)
def get_tox_env_section(self, item: str) -> tuple[Section, list[str], list[str]]: # noqa: PLR6301
return IniSection.test_env(item), [TEST_ENV_PREFIX], [PKG_ENV_PREFIX]
def envs(self, core_config: ConfigSet) -> Iterator[str]:
seen = set()
for name in self._discover_tox_envs(core_config):
if name not in seen:
seen.add(name)
yield name
def _discover_tox_envs(self, core_config: ConfigSet) -> Iterator[str]:
def register_factors(envs: Iterable[str]) -> None:
known_factors.update(chain.from_iterable(e.split("-") for e in envs))
explicit = list(core_config["env_list"])
yield from explicit
known_factors: set[str] = set()
register_factors(explicit)
# discover all additional defined environments, including generative section headers
for section in self.sections():
if section.is_test_env:
register_factors(section.names)
for name in section.names:
self._section_mapping[name].append(section.key)
yield name
# add all conditional markers that are not part of the explicitly defined sections
for section in self.sections():
yield from self._discover_from_section(section, known_factors)
def _discover_from_section(self, section: IniSection, known_factors: set[str]) -> Iterator[str]:
for value in self._parser[section.key].values():
for env in find_envs(value):
if set(env.split("-")) - known_factors:
yield env
__all__ = [
"IniSource",
]
| IniSource |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 9441,
"end": 14836
} | class ____(DashboardComponent, MemoryColor):
"""Total memory usage on the cluster"""
@log_errors
def __init__(self, scheduler, width=600, **kwargs):
DashboardComponent.__init__(self)
MemoryColor.__init__(self)
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"width": [0] * 4,
"x": [0] * 4,
"y": [0] * 4,
"color": ["blue", "blue", "blue", "grey"],
"alpha": [1, 0.7, 0.4, 1],
"proc_memory": [0] * 4,
"managed": [0] * 4,
"unmanaged_old": [0] * 4,
"unmanaged_recent": [0] * 4,
"spilled": [0] * 4,
}
)
self.root = figure(
title="Bytes stored on cluster",
tools="",
width=int(width / 2),
name="cluster_memory",
min_border_bottom=50,
**kwargs,
)
rect = self.root.rect(
source=self.source,
x="x",
y="y",
width="width",
height=0.9,
color="color",
alpha="alpha",
)
rect.nonselection_glyph = None
self.root.axis[0].ticker = BasicTicker(**TICKS_1024)
self.root.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION
self.root.xaxis.minor_tick_line_alpha = 0
self.root.x_range = Range1d(start=0)
self.root.yaxis.visible = False
self.root.ygrid.visible = False
self.root.toolbar_location = "above"
self.root.yaxis.visible = False
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 12px; font-weight: bold;">Process memory (RSS):</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@proc_memory{0.00 b}</span>
</div>
<div style="margin-left: 1em;">
<span style="font-size: 12px; font-weight: bold;">Managed:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@managed{0.00 b}</span>
</div>
<div style="margin-left: 1em;">
<span style="font-size: 12px; font-weight: bold;">Unmanaged (old):</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@unmanaged_old{0.00 b}</span>
</div>
<div style="margin-left: 1em;">
<span style="font-size: 12px; font-weight: bold;">Unmanaged (recent):</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@unmanaged_recent{0.00 b}</span>
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Spilled to disk:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@spilled{0.00 b}</span>
</div>
""",
)
help_ = HelpTool(
redirect="https://docs.dask.org/en/stable/dashboard.html#bytes-stored-and-bytes-per-worker",
description="Description of bytes stored plots",
)
self.root.add_tools(hover, help_)
def _cluster_memory_color(self) -> str:
colors = {
self._memory_color(
current=ws.memory.process,
limit=getattr(ws, "memory_limit", 0),
status=ws.status,
)
for ws in self.scheduler.workers.values()
}
assert colors.issubset({"red", "orange", "blue"})
if "red" in colors:
return "red"
elif "orange" in colors:
return "orange"
else:
return "blue"
@without_property_validation
@log_errors
def update(self):
limit = sum(ws.memory_limit for ws in self.scheduler.workers.values())
meminfo = self.scheduler.memory
color = self._cluster_memory_color()
width = [
meminfo.managed,
meminfo.unmanaged_old,
meminfo.unmanaged_recent,
meminfo.spilled,
]
result = {
"width": width,
"x": [sum(width[:i]) + w / 2 for i, w in enumerate(width)],
"color": [color, color, color, "grey"],
"proc_memory": [meminfo.process] * 4,
"managed": [meminfo.managed] * 4,
"unmanaged_old": [meminfo.unmanaged_old] * 4,
"unmanaged_recent": [meminfo.unmanaged_recent] * 4,
"spilled": [meminfo.spilled] * 4,
}
x_end = max(limit, meminfo.process + meminfo.spilled)
self.root.x_range.end = x_end
title = f"Bytes stored: {format_bytes(meminfo.process)}"
if meminfo.spilled:
title += f" + {format_bytes(meminfo.spilled)} spilled to disk"
self.root.title.text = title
update(self.source, result)
| ClusterMemory |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 35150,
"end": 35441
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
members = [('count', types.EphemeralPointer(types.intp)),
('iter', fe_type.source_type)]
super(EnumerateType, self).__init__(dmm, fe_type, members)
@register_default(types.ZipType)
| EnumerateType |
python | numba__numba | numba/core/types/containers.py | {
"start": 24458,
"end": 24908
} | class ____(SimpleIterableType):
"""Dictionary iterable type for .keys()
"""
def __init__(self, parent):
assert isinstance(parent, DictType)
self.parent = parent
self.yield_type = self.parent.key_type
name = "keys[{}]".format(self.parent.name)
self.name = name
iterator_type = DictIteratorType(self)
super(DictKeysIterableType, self).__init__(name, iterator_type)
| DictKeysIterableType |
python | celery__celery | t/smoke/tests/quorum_queues/conftest.py | {
"start": 499,
"end": 1949
} | class ____(RabbitMQTestBroker):
def get_management_url(self) -> str:
"""Opening this link during debugging allows you to see the
RabbitMQ management UI in your browser.
Usage from a test:
>>> celery_setup.broker.get_management_url()
Open from a browser and login with guest:guest.
"""
ports = self.container.attrs["NetworkSettings"]["Ports"]
ip = ports["15672/tcp"][0]["HostIp"]
port = ports["15672/tcp"][0]["HostPort"]
return f"http://{ip}:{port}"
@pytest.fixture
def default_rabbitmq_broker_image() -> str:
return "rabbitmq:management"
@pytest.fixture
def default_rabbitmq_broker_ports() -> dict:
# Expose the management UI port
ports = RABBITMQ_PORTS.copy()
ports.update({"15672/tcp": None})
return ports
@pytest.fixture
def celery_rabbitmq_broker(default_rabbitmq_broker: RabbitMQContainer) -> RabbitMQTestBroker:
broker = RabbitMQManagementBroker(default_rabbitmq_broker)
yield broker
broker.teardown()
@pytest.fixture
def celery_broker_cluster(celery_rabbitmq_broker: RabbitMQTestBroker) -> CeleryBrokerCluster:
cluster = CeleryBrokerCluster(celery_rabbitmq_broker)
yield cluster
cluster.teardown()
###############################################################################
# Worker Configuration
###############################################################################
| RabbitMQManagementBroker |
python | pyca__cryptography | src/cryptography/hazmat/primitives/kdf/kbkdf.py | {
"start": 440,
"end": 737
} | class ____(utils.Enum):
BeforeFixed = "before_fixed"
AfterFixed = "after_fixed"
MiddleFixed = "middle_fixed"
KBKDFHMAC = rust_openssl.kdf.KBKDFHMAC
KeyDerivationFunction.register(KBKDFHMAC)
KBKDFCMAC = rust_openssl.kdf.KBKDFCMAC
KeyDerivationFunction.register(KBKDFCMAC)
| CounterLocation |
python | django__django | tests/utils_tests/test_feedgenerator.py | {
"start": 229,
"end": 6240
} | class ____(SimpleTestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri(
"http://example.org/foo/bar#headline", datetime.date(2004, 10, 25)
),
"tag:example.org,2004-10-25:/foo/bar/headline",
)
def test_get_tag_uri_with_port(self):
"""
get_tag_uri() correctly generates TagURIs from URLs with port numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri(
"http://www.example.org:8000/2008/11/14/django#headline",
datetime.datetime(2008, 11, 14, 13, 37, 0),
),
"tag:www.example.org,2008-11-14:/2008/11/14/django/headline",
)
def test_rfc2822_date(self):
"""
rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000",
)
def test_rfc2822_date_with_timezone(self):
"""
rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(
datetime.datetime(
2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(60)
)
),
"Fri, 14 Nov 2008 13:37:00 +0100",
)
def test_rfc2822_date_without_time(self):
"""
rfc2822_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.date(2008, 11, 14)),
"Fri, 14 Nov 2008 00:00:00 -0000",
)
def test_rfc3339_date(self):
"""
rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z",
)
def test_rfc3339_date_with_timezone(self):
"""
rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(
datetime.datetime(
2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(120)
)
),
"2008-11-14T13:37:00+02:00",
)
def test_rfc3339_date_without_time(self):
"""
rfc3339_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.date(2008, 11, 14)),
"2008-11-14T00:00:00Z",
)
def test_atom1_mime_type(self):
"""
Atom MIME type has UTF8 Charset parameter set
"""
atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
self.assertEqual(atom_feed.content_type, "application/atom+xml; charset=utf-8")
def test_rss_mime_type(self):
"""
RSS MIME type has UTF8 Charset parameter set
"""
rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description")
self.assertEqual(rss_feed.content_type, "application/rss+xml; charset=utf-8")
# Two regression tests for #14202
def test_feed_without_feed_url_gets_rendered_without_atom_link(self):
feed = feedgenerator.Rss201rev2Feed("title", "/link/", "descr")
self.assertIsNone(feed.feed["feed_url"])
feed_content = feed.writeString("utf-8")
self.assertNotIn("<atom:link", feed_content)
self.assertNotIn('href="/feed/"', feed_content)
self.assertNotIn('rel="self"', feed_content)
def test_feed_with_feed_url_gets_rendered_with_atom_link(self):
feed = feedgenerator.Rss201rev2Feed(
"title", "/link/", "descr", feed_url="/feed/"
)
self.assertEqual(feed.feed["feed_url"], "/feed/")
feed_content = feed.writeString("utf-8")
self.assertIn("<atom:link", feed_content)
self.assertIn('href="/feed/"', feed_content)
self.assertIn('rel="self"', feed_content)
def test_atom_add_item(self):
# Not providing any optional arguments to Atom1Feed.add_item()
feed = feedgenerator.Atom1Feed("title", "/link/", "descr")
feed.add_item("item_title", "item_link", "item_description")
feed.writeString("utf-8")
def test_deterministic_attribute_order(self):
feed = feedgenerator.Atom1Feed("title", "/link/", "desc")
feed_content = feed.writeString("utf-8")
self.assertIn('href="/link/" rel="alternate"', feed_content)
def test_latest_post_date_returns_utc_time(self):
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description")
self.assertEqual(
rss_feed.latest_post_date().tzinfo,
datetime.UTC,
)
def test_stylesheet_keeps_lazy_urls(self):
m = mock.Mock(return_value="test.css")
stylesheet = feedgenerator.Stylesheet(SimpleLazyObject(m))
m.assert_not_called()
self.assertEqual(
str(stylesheet), 'href="test.css" media="screen" type="text/css"'
)
m.assert_called_once()
def test_stylesheet_attribute_escaping(self):
style = feedgenerator.Stylesheet(
url='http://example.com/style.css?foo="bar"&baz=<>',
mimetype='text/css; charset="utf-8"',
media='screen and (max-width: "600px")',
)
self.assertEqual(
str(style),
'href="http://example.com/style.css?foo=%22bar%22&baz=%3C%3E" '
'media="screen and (max-width: "600px")" '
'type="text/css; charset="utf-8""',
)
| FeedgeneratorTests |
python | huggingface__transformers | tests/quantization/bnb/test_4bit.py | {
"start": 31340,
"end": 32556
} | class ____(unittest.TestCase):
model_name = "hf-internal-testing/tiny-random-LlamaForCausalLM"
input_text = "Hello my name is"
def setUp(self):
# Models and tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model_4bit = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
@pytest.mark.torch_compile_test
def test_generate_compile(self):
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# if nothing is set, compile will be disabled for bnb
self.model_4bit.generate(
input_ids=encoded_input["input_ids"].to(self.model_4bit.device),
max_new_tokens=10,
cache_implementation="static",
)
with self.assertRaises(Exception):
# overwrite property
object.__setattr__(self.model_4bit.hf_quantizer, "is_compileable", True)
self.model_4bit.generate(
input_ids=encoded_input["input_ids"].to(self.model_4bit.device),
max_new_tokens=10,
cache_implementation="static",
)
| Bnb4bitCompile |
python | wandb__wandb | tests/unit_tests/test_retry.py | {
"start": 4252,
"end": 5431
} | class ____:
def test_respects_max_retries(self):
backoff = retry.ExponentialBackoff(
initial_sleep=SECOND, max_sleep=SECOND, max_retries=3
)
for _ in range(3):
backoff.next_sleep_or_reraise(MyError())
with pytest.raises(MyError):
backoff.next_sleep_or_reraise(MyError())
def test_respects_timeout(self, mock_time: MockTime):
t0 = mock_time.now()
dt = 300 * SECOND
backoff = retry.ExponentialBackoff(
initial_sleep=SECOND, max_sleep=10 * dt, timeout_at=t0 + dt
)
with pytest.raises(MyError):
for _ in range(9999):
mock_time.sleep(
backoff.next_sleep_or_reraise(MyError()).total_seconds()
)
assert t0 + dt <= mock_time.now() <= t0 + 2 * dt
def test_respects_max_sleep_if_smaller_than_initial_sleep(
self, mock_time: MockTime
):
max_sleep = 10 * SECOND
backoff = retry.ExponentialBackoff(
initial_sleep=2 * max_sleep, max_sleep=max_sleep
)
assert backoff.next_sleep_or_reraise(MyError()) == max_sleep
| TestExponentialBackoff |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_use_orig_params.py | {
"start": 45553,
"end": 47344
} | class ____(FSDPTest):
@skip_if_lt_x_gpu(2)
def test_named_parameters_in_forward(self):
"""
Tests that calling ``named_parameters()`` during forward returns FQNs
and ``Tensor`` s corresponding to the original parameters.
"""
param_shapes = [None, None]
assert_equal_fn = self.assertEqual
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(5, 5)
def forward(self, x: torch.Tensor) -> torch.Tensor:
nonlocal param_shapes
# Allow for FSDP prefixes
param_names = [
clean_tensor_name(tup[0]) for tup in self.named_parameters()
]
params = [tup[1] for tup in self.named_parameters()]
assert param_shapes[0] is not None and param_shapes[1] is not None, (
"`param_sizes` should be set"
)
assert_equal_fn(
param_names,
[
"lin.weight",
"lin.bias",
],
)
assert_equal_fn(params[0].shape, param_shapes[0])
assert_equal_fn(params[1].shape, param_shapes[1])
return self.lin(x)
model = Model().to(device=device_type)
# Save the *unsharded* original parameter shapes and check the shapes
# match in the forward pass
param_shapes[0] = model.lin.weight.shape
param_shapes[1] = model.lin.bias.shape
fsdp_model = FSDP(model, use_orig_params=True)
inp = torch.randn((2, 5), device=torch.device(device_type))
fsdp_model(inp)
| TestFSDPUseOrigParamsFQNs |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 12305,
"end": 14165
} | class ____(DataFrameConstraint):
"""A dataframe constraint that validates column existence and ordering.
Args:
strict_column_list (List[str]): The exact list of columns that your dataframe must have.
enforce_ordering (Optional[bool]): If true, will enforce that the ordering of column names must match.
Default is False.
"""
def __init__(self, strict_column_list, enforce_ordering=False):
self.enforce_ordering = check.bool_param(enforce_ordering, "enforce_ordering")
self.strict_column_list = check.list_param(
strict_column_list, "strict_column_list", of_type=str
)
description = f"No columns outside of {self.strict_column_list} allowed. "
if enforce_ordering:
description += "Columns must be in that order."
super().__init__(error_description=description, markdown_description=description)
def validate(self, dataframe):
check.inst_param(dataframe, "dataframe", DataFrame)
columns_received = list(dataframe.columns)
if self.enforce_ordering:
if self.strict_column_list != columns_received:
raise DataFrameConstraintViolationException(
constraint_name=self.name,
constraint_description=(
f"Expected the following ordering of columns {self.strict_column_list}. Received:"
f" {columns_received}"
),
)
for column in columns_received:
if column not in self.strict_column_list:
raise DataFrameConstraintViolationException(
constraint_name=self.name,
constraint_description=f"Expected {self.strict_column_list}. Recevied {columns_received}.",
)
@beta
| StrictColumnsConstraint |
python | getsentry__sentry | src/sentry/projects/services/project/service.py | {
"start": 895,
"end": 3703
} | class ____(RpcService):
key = "project"
local_mode = SiloMode.REGION
@classmethod
def get_local_implementation(cls) -> RpcService:
from sentry.projects.services.project.impl import DatabaseBackedProjectService
return DatabaseBackedProjectService()
@regional_rpc_method(resolve=ByRegionName())
@abstractmethod
def get_many_by_organizations(
self,
*,
region_name: str,
organization_ids: list[int],
) -> list[RpcProject]:
pass
@regional_rpc_method(resolve=ByOrganizationIdAttribute("project"))
@abstractmethod
def get_option(self, *, project: RpcProject, key: str) -> RpcProjectOptionValue:
pass
@regional_rpc_method(resolve=ByOrganizationIdAttribute("project"))
@abstractmethod
def update_option(self, *, project: RpcProject, key: str, value: OptionValue) -> bool:
pass
@regional_rpc_method(resolve=ByOrganizationIdAttribute("project"))
@abstractmethod
def delete_option(self, *, project: RpcProject, key: str) -> None:
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_by_id(self, *, organization_id: int, id: int) -> RpcProject | None:
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_by_slug(self, *, organization_id: int, slug: str) -> RpcProject | None:
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def serialize_many(
self,
*,
organization_id: int,
filter: ProjectFilterArgs,
as_user: RpcUser | None = None,
auth_context: AuthenticationContext | None = None,
) -> list[OpaqueSerializedResponse]:
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def create_project_for_organization(
self,
*,
organization_id: int,
project_name: str,
platform: str,
user_id: int,
add_org_default_team: bool | None = False,
external_id: str | None = None,
) -> RpcProject:
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def get_or_create_project_for_organization(
self,
*,
organization_id: int,
project_name: str,
platform: str,
user_id: int,
add_org_default_team: bool | None = False,
external_id: str | None = None,
) -> RpcProject:
pass
@regional_rpc_method(resolve=ByOrganizationId())
@abstractmethod
def update_project(
self,
*,
organization_id: int,
project_id: int,
attrs: ProjectUpdateArgs,
) -> RpcProject:
pass
project_service = ProjectService.create_delegation()
| ProjectService |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_pattern01.py | {
"start": 315,
"end": 1962
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_pattern01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [86421504, 86423040]
data = [
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
worksheet.write_column("E1", data[4])
worksheet.write_column("F1", data[5])
worksheet.write_column("G1", data[6])
worksheet.write_column("H1", data[7])
chart.add_series({"values": "=Sheet1!$A$1:$A$3"})
chart.add_series({"values": "=Sheet1!$B$1:$B$3"})
chart.add_series({"values": "=Sheet1!$C$1:$C$3"})
chart.add_series({"values": "=Sheet1!$D$1:$D$3"})
chart.add_series({"values": "=Sheet1!$E$1:$E$3"})
chart.add_series({"values": "=Sheet1!$F$1:$F$3"})
chart.add_series({"values": "=Sheet1!$G$1:$G$3"})
chart.add_series({"values": "=Sheet1!$H$1:$H$3"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/owlvit/configuration_owlvit.py | {
"start": 5502,
"end": 9547
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate
an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the OWL-ViT
[google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 768):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import OwlViTVisionConfig, OwlViTVisionModel
>>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration
>>> configuration = OwlViTVisionConfig()
>>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration
>>> model = OwlViTVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "owlvit_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
image_size=768,
patch_size=32,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
| OwlViTVisionConfig |
python | django__django | django/templatetags/tz.py | {
"start": 2542,
"end": 2892
} | class ____(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
| TimezoneNode |
python | doocs__leetcode | solution/1600-1699/1625.Lexicographically Smallest String After Applying Operations/Solution.py | {
"start": 0,
"end": 535
} | class ____:
def findLexSmallestString(self, s: str, a: int, b: int) -> str:
q = deque([s])
vis = {s}
ans = s
while q:
s = q.popleft()
if ans > s:
ans = s
t1 = ''.join(
[str((int(c) + a) % 10) if i & 1 else c for i, c in enumerate(s)]
)
t2 = s[-b:] + s[:-b]
for t in (t1, t2):
if t not in vis:
vis.add(t)
q.append(t)
return ans
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/composition.py | {
"start": 28438,
"end": 28786
} | class ____(NamedTuple):
"""The metadata about a node invocation saved by the current composition context."""
node_name: str
node_def: NodeDefinition
input_bindings: Mapping[str, InputSource]
tags: Optional[Mapping[str, str]]
hook_defs: Optional[AbstractSet[HookDefinition]]
retry_policy: Optional[RetryPolicy]
| InvokedNode |
python | tensorflow__tensorflow | tensorflow/python/framework/func_graph.py | {
"start": 5331,
"end": 51905
} | class ____(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self,
name,
collections=None,
capture_by_value=None,
structured_input_signature=None,
structured_outputs=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start with.
If not specified (None), the FuncGraph will read (but not write to) the
outer graph's collections that are not allowlisted, and both read and
write to the outer graph's collections that are allowlisted. The current
allowlisted collections are the global variables, the local variables,
and the trainable variables. Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit from
outer graphs, and failing that will default to False.
structured_input_signature: Optional. The structured input signature to
use for initializing the FuncGraph. See the docstring for FuncGraph for
more information.
structured_outputs: Optional. The structured outputs to use for
initializing the FuncGraph. See the docstring for FuncGraph for more
information.
"""
super().__init__()
self.name = name
# TODO(panzf): Separate captures from non-captures inputs in self.inputs
self.inputs = []
self.outputs = []
self.control_outputs = []
self.structured_input_signature = structured_input_signature
self.structured_outputs = structured_outputs
self._resource_tensor_inputs = object_identity.ObjectIdentitySet()
self._weak_variables = []
self._watched_variables = object_identity.ObjectIdentityWeakSet()
self.is_control_flow_graph = False
self._function_captures = capture_container.FunctionCaptures()
outer_graph = ops.get_default_graph()
self._weak_outer_graph = weakref.ref(outer_graph)
while outer_graph.building_function:
outer_graph = outer_graph.outer_graph
# If self._weak_outer_graph is deleted, we revert to the outermost Graph
# active when the FuncGraph was traced. This will not be a FuncGraph.
self._fallback_outer_graph = outer_graph
# If not None, records the names of output args of this function. Used to
# preserve the output names in the signature of a serialized+deserialized
# function. Private at the moment mostly because it's often out of date.
self._output_names = None
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(self.outer_graph,
FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
# Keep track of whether this FuncGraph is exportable to SavedModel. Use
# `graph.mark_as_unsaveable(reason)` to mark this FuncGraph and any
# dependent functions as unsaveable.
self._saveable = True
self._saving_errors = set()
# Keep track of callbacks to run when this graph exits default scope
self._scope_exit_callbacks = None
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
# Don't watch `v` if it is one of ResourceVariable input arguments.
if (isinstance(v, resource_variable_ops.ResourceVariable) and
v.handle in self._resource_tensor_inputs):
return
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def capture_call_time_value(self,
closure,
spec,
key=None,
default_value=None,
placeholder=None):
"""Returns a placeholder which at call time has the value closure().
The `tf.function` supports the notion of captures, that is, it allows Python
functions to have closure variables, which bind over some value outside the
function. However, this name binding is "early binding" performed before the
program is run, i.e.,
```
@tf.function
def f():
return x
x = tf.constant(1)
f() # returns 1
x = tf.constant(2)
f() # still returns 1!
```
while in Python, name binding is performed as the program is running.
```
def f():
return x
x = 1
f() # returns 1
x = 2
f() # returns 2
```
`capture_call_time_value` allows tf.function to mimic late binding as a
Python function does, by passing in a `closure` callable argument to be
executed when the tf.function is invoked eagerly. E.g.
```
@tf.function
def f():
return ops.get_default_graph.capture_call_time_value(lambda: x)
x = tf.constant(1)
f() # returns 1
x = tf.constant(2)
f() # returns 2
```
Note that a `capture_call_time_value` function itself does not work well in
the saving process (since the tf.function in which it's called is not
invoked eagerly) unless passed a `default_value` argument. At saving time,
the `default_value` argument is returned instead.
Args:
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
key: optional. If not None, multiple calls to lazy_capture with the same
key in the same graph will return the same placeholder, and the first
closure will be used at function call time.
default_value: optional value to return in environments that cannot safely
evaluate closure.
placeholder: optional. If not None, the graph will take the passed-in
`placeholder` as the internal capture instead of creating a new one.
This is useful when loading from a SavedModel.
Returns:
Nest of placeholders which, at function call time, will be fed with the
result of calling closure().
Raises:
ValueError: at function call time, if the return value of closure() is
not compatible with `spec`.
"""
if key is None:
key = object()
if key not in self._function_captures.by_ref_internal:
trace_ctx = trace_type.InternalTracingContext(True)
spec = trace_type.from_value(spec, trace_ctx)
if placeholder is None:
placeholder_ctx = trace_type.InternalPlaceholderContext(self)
placeholder = spec.placeholder_value(placeholder_ctx)
def wrapped_closure():
# One major case requiring returning a `default_value` is when passing a
# concrete function to `save`, i.e.
# serving_fn = serve_fn.get_concrete_function(...)
# model.save(save_dir, signatures={"serving_default": serving_fn})
# `serving_fn` has deferred captures added through
# `capture_call_time_value`. It can't be saved correctly since
# `wrapped_closure` will end up executing under a default Graph instead
# of FuncGraph. The user of `capture_call_time_value` also cannot
# conditionally avoid this call since presence of `save_context` when
# executing `wrapped_closure` is not known at tracing time of
# `serving_fn`.
if save_context.in_save_context() and default_value is not None:
return default_value
# TODO(wxinyi): raise an error if in save context but no default value.
if not context.executing_eagerly():
graph = ops.get_default_graph()
assert isinstance(
graph, FuncGraph
), "This API should only be used in TF2 environment."
with graph.as_default():
ret_nest = graph.capture_call_time_value(
closure, spec, key=key, default_value=default_value)
else:
ret_nest = closure()
ret_nest = spec.cast(ret_nest, trace_type.InternalCastContext)
return spec.to_tensors(ret_nest)
wrapped_closure.output_spec = spec
self._function_captures.add_or_replace(
key=key,
external=wrapped_closure,
internal=placeholder,
tracetype=spec,
is_by_ref=True)
return self._function_captures.by_ref_internal[key]
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super().control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, indexed_slices.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(graph_element, "graph",
None) is not self:
self._function_captures.control.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super().control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super().as_default()
return _func_graph_as_default_inner_cm(self, outer_cm)
@property
def outer_graph(self):
"""The Graph this FuncGraph is nested in.
Functions may capture Tensors from graphs they are nested in (transitive).
Returns:
A Graph object. Initially set to the current default graph when the
FuncGraph was created. If the previous `outer_graph` was deleted because
the function that owns it was deleted, `outer_graph` is reset to the
outermost default graph active when the FuncGraph was created. This
FuncGraph won't have captured anything from the new `outer_graph` (and
likely not from the previous setting, since that would have created a
strong reference), but it is returned so that FuncGraphs always have a
parent.
"""
current = self._weak_outer_graph()
if current is None:
return self._fallback_outer_graph
return current
@outer_graph.setter
def outer_graph(self, new_outer_graph):
"""Sets `outer_graph` to `new_outer_graph`."""
self._weak_outer_graph = weakref.ref(new_outer_graph)
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def trainable_variables(self):
"""A sequence of trainable variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of trainable variables for this func graph.
"""
return tuple(v for v in self.variables if v.trainable)
@property
def variables(self):
"""A sequence of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of variables for this func graph.
"""
def deref(weak_v):
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
return v
return tuple(deref(v) for v in self._weak_variables)
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((id(v), k) for k, v in self.captures)
uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access
op_type, uncaptured_inputs, dtypes, input_types, name, attrs,
op_def, compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
if self.capture_by_value and op_type in [
"ReadVariableOp", "ResourceGather"
]:
return self._capture_by_value(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
# Use a different list to avoid modifying the original inputs list.
captured_inputs = []
for inp in inputs:
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
captured_inputs.append(inp)
return super()._create_op_internal( # pylint: disable=protected-access
op_type, captured_inputs, dtypes, input_types, name, attrs, op_def,
compute_device)
def capture(self, tensor, name=None, shape=None):
return self._function_captures.capture_by_value(self, tensor, name)
def _validate_in_scope(self, tensor):
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
try:
tb = tensor.op.traceback
except AttributeError:
tensor_traceback = "<unknown>"
else:
tensor_traceback_list = []
for frame in traceback.format_list(tb.get_user_frames()):
tensor_traceback_list.extend(
[f" {line}" for line in frame.split("\n") if line.strip()])
tensor_traceback = "\n".join(tensor_traceback_list)
# Keep in sync with tfe_wrapper.cc.
# TODO(b/200991648): Unify those two paths.
raise errors.InaccessibleTensorError(
f"{tensor!r} is out of scope and cannot be used here. Use return "
"values, explicit Python locals or TensorFlow collections to "
"access it.\n"
"Please see https://www.tensorflow.org/guide/function#all_outputs_of_a_tffunction_must_be_return_values " # pylint: disable=line-too-long
"for more information.\n\n"
f"{tensor!r} was defined here:\n{tensor_traceback}\n\n"
f"The tensor {tensor!r} cannot be accessed from {self}, because "
f"it was defined in {tensor.graph}, which is out of scope.")
inner_graph = inner_graph.outer_graph
# TODO(panzf): Rename this method along with usages in cond/while graph.
def _capture_helper(self, tensor, name):
return self._function_captures._create_placeholder_helper( # pylint: disable=protected-access
self, tensor, name)
def _experimental_capture_side_input_by_ref(self, identifier: Hashable,
func: Callable[[], Any]) ->...:
"""Implement capturing side input by reference for tf.function.
Note that this API will only register the capture in the func_graph where
it is called. In the case of nested graph, like nested tf.function or
tf.while, the outer graph is not aware of this capture in the inner graph.
Thus, the outer tf.function will not retrace when the by-ref capture
changes. It's the user's responsibility to call this API in the outer
func_graph as well if proper retracing is needed.
For example:
```
x = 1
# Correct usage
@tf.function
def f_1():
graph = tf.compat.v1.get_default_graph()
# Capture the same x for the outer tf.function
graph._experimental_capture_side_input_by_ref("x", lambda: x)
@tf.function
def g():
graph = tf.compat.v1.get_default_graph()
cap_x = graph._experimental_capture_side_input_by_ref("x", lambda: x)
return cap_x + 1
return g()
# Incorrect usage
@tf.function
def f_2():
@tf.function
def g():
graph = tf.compat.v1.get_default_graph()
cap_x = graph._experimental_capture_side_input_by_ref("x", lambda: x)
return cap_x + 1
return g()
assert f_1() == 2
assert f_2() == 2
x = 2
assert f_1() == 3
assert f_2() == 2 # This is incorrect
```
Args:
identifier: A hashable object as the key for the capture.
func: A Python function that takes no arguments and returns the value of
side input. The function is evaluated at function call time.
Returns:
A nested structure with the same structure as the side input. Tensors
are replaced with placehoders, and non-tensors remain the same.
"""
if context.executing_eagerly():
return func()
def maybe_convert_to_tensor():
value = func()
if not (isinstance(value, core.Value) or isinstance(value, core.Symbol)):
value = constant_op.constant(value)
return value
placeholder = self._function_captures._capture_by_ref( # pylint: disable=protected-access
self, maybe_convert_to_tensor, identifier)
return placeholder
@property
def captures(self):
"""Order list of tuples containing external and internal captures."""
return self._function_captures.by_val_capture_tuples
def add_capture(self, tensor, placeholder):
"""Capture a specific tensor and utilize the provided placeholder.
Args:
tensor: Tensor to captures.
placeholder: Provided placeholder for the tensor.
"""
self._function_captures.add_or_replace(
key=id(tensor),
external=tensor,
internal=placeholder,
is_by_ref=False)
self.inputs.append(placeholder)
def replace_capture(self, tensor, placeholder):
"""Replace already existing capture."""
self._function_captures.add_or_replace(
key=id(tensor),
external=tensor,
internal=placeholder,
is_by_ref=False)
def replace_capture_with_deferred_capture(self,
tensor,
closure,
spec,
placeholder,
default_value=None):
"""Replaces existing capture `tensor` with a deferred capture `closure`.
Caution: It is the caller's responsibility to make sure that, after calling
this function, the TypeSpec of the `inputs` (i.e. internal placeholders) and
the `_captured_inputs` (i.e. external captures) of a concrete function that
wraps this function graph are still compatible. Thus user should pairing
usage of this function with `ConcreteFunction.set_external_captures` to make
sure the order still matches. For example,
```
# concrete_fn._captured_inputs == [tensor1, tensor2, tensor3]
# concrete_fn.inputs == [placeholder1, placeholder2, placeholder3]
# replace external capture `tensor2` with a deferred_capture, i.e., a
# closure, `closure2`
concrete_fn.graph.replace_capture_with_deferred_capture(tensor2,
closure2,
placeholder2,
some_spec,
some_default)
concrete_fn.set_external_captures([tensor1, closure2, tensor3])
```
Args:
tensor: Tensor already captured.
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
placeholder: the internal placeholder corresponding to the captured
`tensor`.
default_value: optional value to use in environments that cannot safely
evaluate closure.
"""
self._function_captures.pop(id(tensor), is_by_ref=False)
self.capture_call_time_value(
closure,
spec,
key=id(tensor),
default_value=default_value,
placeholder=placeholder)
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self._function_captures.by_val_external.values())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self._function_captures.by_val_internal.values())
@property
def deferred_external_captures(self):
"""Ordered nest of tensors whose placeholders will be fed at call time."""
return list(self._function_captures.by_ref_external.values())
@property
def deferred_internal_captures(self):
"""List of nest of placeholders which at call time will be fed."""
return list(self._function_captures.by_ref_internal.values())
@property
def variable_captures(self):
"""Map of python object ids of variables to variables which are captured."""
return self.variables
@property
def function_captures(self):
return self._function_captures
def mark_as_unsaveable(self, error_message):
"""Marks this FuncGraph as unsaveable.
Any attempts to export this FuncGraph will raise an error with the specified
message.
Args:
error_message: List or string containing the error message to be raised
when saving this FuncGraph to SavedModel.
"""
self._saveable = False
if isinstance(error_message, str):
error_message = [error_message]
self._saving_errors.update(error_message)
@property
def saveable(self):
"""Returns whether this FuncGraph is saveable."""
return self._saveable
@property
def saving_errors(self):
"""Returns set of errors preventing this FuncGraph from being saved."""
return self._saving_errors
def _add_scope_exit_callback(self, fn):
"""Add a function to call when this graph exits the default scope."""
if not callable(fn):
raise TypeError("fn is not callable: {}".format(fn))
if self._scope_exit_callbacks is None:
raise RuntimeError(
"Attempting to add a scope exit callback, but the default graph is "
"not the context scope graph. Did you forget to call "
"'with graph.as_default(): ...'?")
self._scope_exit_callbacks.append(fn)
@tf_contextlib.contextmanager
def _func_graph_as_default_inner_cm(
func_graph: FuncGraph, outer_cm: ContextManager[ops.Graph]):
"""Context manager for copying distribute.Strategy scope information."""
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
graph = ops.get_default_graph()
old_strategy_stack = func_graph._distribution_strategy_stack
func_graph._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
# However, we need to preserve the outer device stack in the following
# cases in non eager context:
# 1. device stack is callable
# 2. When using distribution strategy with legacy graph mode.
old_device_stack = func_graph._device_function_stack
if (not context.executing_eagerly() and
(device_stack_has_callable(graph._device_function_stack) or
(func_graph._distribution_strategy_stack and
not ops.executing_eagerly_outside_functions()))):
# Hard-code devices from device functions in the function body
func_graph._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = func_graph._variable_creator_stack
func_graph._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = func_graph._graph_key
func_graph._graph_key = graph._graph_key
old_scope_exit_callbacks = func_graph._scope_exit_callbacks
func_graph._scope_exit_callbacks = []
with outer_cm as g:
try:
yield g
finally:
try:
for fn in func_graph._scope_exit_callbacks:
fn()
finally:
func_graph._scope_exit_callbacks = old_scope_exit_callbacks
func_graph._distribution_strategy_stack = old_strategy_stack
func_graph._device_function_stack = old_device_stack
func_graph._variable_creator_stack = old_creator_stack
func_graph._graph_key = old_graph_key
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
create_placeholders=True):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start with.
If not specified (None), the FuncGraph will read (but not write to) the
outer graph's collections that are not allowlisted, and both read and
write to the outer graph's collections that are allowlisted. The current
allowlisted collections are the global variables, the local variables, and
the trainable variables. Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
create_placeholders: An optional boolean. If True, then func graph will
create placeholders for the inputs as graph ops. If False, the input args
and kwargs will be treated as the input placeholders.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None`, a
`Tensor` or a `tf.experimental.ExtensionType`.
"""
if op_return_value is not None:
assert isinstance(op_return_value, tensor_lib.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(
name, collections=collections, capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
deps_control_manager = auto_control_deps.AutomaticControlDependencies()
else:
deps_control_manager = ops.NullContextmanager()
with func_graph.as_default(), deps_control_manager as deps_ctx:
current_scope = variable_scope.get_variable_scope()
default_use_resource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None:
args = signature
kwargs = {}
if create_placeholders:
func_args, func_kwargs = _create_placeholders(args, kwargs, arg_names)
else:
func_args, func_kwargs = args, kwargs
input_trace_types = trace_type.from_value([func_args, func_kwargs])
func_graph.inputs = input_trace_types.to_tensors([func_args, func_kwargs]) # pylint: disable=protected-access
# Reset variables watched while deconstructing inputs.
func_graph._watched_variables = object_identity.ObjectIdentityWeakSet() # pylint: disable=protected-access
for arg in func_graph.inputs:
if arg.dtype == dtypes.resource:
func_graph._resource_tensor_inputs.add(arg) # pylint:disable=protected-access
signature_context = trace_type.InternalTracingContext()
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(
func_args, arg_names, signature_context=signature_context),
convert_structure_to_signature(
func_kwargs, signature_context=signature_context))
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(
func_args,
nest.flatten(func_args, expand_composites=True),
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs,
nest.flatten(func_kwargs, expand_composites=True),
expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.function, Python functions "
"must return zero or more Tensors or ExtensionTypes or None "
f"values; in compilation of {str(python_func)}, found return "
f"value of type {type(x).__name__}, which is not a Tensor or "
"ExtensionType.")
if add_control_dependencies:
x = deps_ctx.mark_as_return(x)
return x
_, original_func = tf_decorator.unwrap(python_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = variable_utils.convert_variables_to_tensors(func_outputs)
func_outputs = nest.map_structure(
convert, func_outputs, expand_composites=True)
# flatten and unflatten func_args and func_kwargs to maintain parity
# from flattening which sorts by key
func_args = nest.pack_sequence_as(
func_args,
nest.flatten(func_args, expand_composites=True),
expand_composites=True)
func_kwargs = nest.pack_sequence_as(
func_kwargs,
nest.flatten(func_kwargs, expand_composites=True),
expand_composites=True)
check_func_mutation(func_args_before, func_kwargs_before, func_args,
func_kwargs, original_func)
current_scope.set_use_resource(default_use_resource)
inputs = []
for arg in composite_tensor_utils.flatten_with_variables([func_args,
func_kwargs]):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
capture = func_graph._function_captures.pop(id(arg.handle), False) # pylint: disable=protected-access
assert len(capture) >= 2
resource_placeholder = capture[1]
if resource_placeholder is None:
continue
inputs.append(resource_placeholder)
elif isinstance(arg, tensor_lib.Tensor):
inputs.append(arg)
func_graph.inputs = (
inputs + func_graph.internal_captures + nest.flatten(
func_graph.deferred_internal_captures, expand_composites=True))
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = func_graph._watched_variables # pylint: disable=protected-access
if add_control_dependencies:
func_graph.control_outputs.extend(deps_control_manager.ops_which_must_run)
func_graph.collective_manager_ids_used = (
deps_control_manager.collective_manager_ids_used)
return func_graph
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(
callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def has_mutation(n1, n2):
"""Returns true if n1 and n2 are different (using `is` to compare leaves)."""
try:
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
return True
for arg1, arg2 in zip(
nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
return True
return False
def check_func_mutation(old_args, old_kwargs, new_args, new_kwargs, func):
"""Checks that the arguments to a function are not modified."""
if not has_mutation((old_args, old_kwargs), (new_args, new_kwargs)):
return
# Mutation detected; construct a useful error message.
func_name = getattr(func, "__qualname__", getattr(func, "__name__", func))
signature = tf_inspect.signature(func)
try:
old_bound = signature.bind(*old_args, **old_kwargs).arguments
new_bound = signature.bind(*new_args, **new_kwargs).arguments
except TypeError as e:
# This occurs when the function is called with the (deprecated)
# "flat signature". See ConcreteFunction._call_with_flat_signature. In
# this case, we can't report which arguments were modified.
raise ValueError(
f"{func_name}{signature} should not modify its Python input "
f"arguments. Check if it modifies any lists or dicts passed as "
f"arguments. Modifying a copy is allowed.") from e
assert set(old_bound) == set(new_bound)
modified_args = [
arg_name for arg_name in new_bound
if has_mutation(old_bound[arg_name], new_bound[arg_name])
]
changes = ", ".join(modified_args)
raise ValueError(f"{func_name}{signature} should not modify its Python "
f"input arguments. Modifying a copy is allowed. The "
f"following parameter(s) were modified: {changes}")
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence
]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_placeholders(args, kwargs, arg_names=None):
"""Create placeholders given positional args and keyword args."""
signature_context = trace_type.InternalTracingContext(
is_legacy_signature=True)
arg_trace_types = trace_type.from_value(tuple(args), signature_context)
kwarg_trace_types = trace_type.from_value(kwargs, signature_context)
placeholder_mapping = signature_context.get_placeholder_mapping()
placeholder_context = trace_type.InternalPlaceholderContext(
ops.get_default_graph(), placeholder_mapping)
if arg_names is None:
arg_names = [None] * len(arg_trace_types.components)
# Create placeholders for trace type args and trace type kwargs
func_args = []
for name, trace_type_arg in zip(arg_names, arg_trace_types.components):
placeholder_context.update_naming_scope(name)
placeholder = trace_type_arg.placeholder_value(placeholder_context)
func_args.append(placeholder)
func_kwargs = {}
for name, trace_type_kwarg in zip(*sorted(kwarg_trace_types.mapping.items())):
placeholder_context.update_naming_scope(name)
placeholder = trace_type_kwarg.placeholder_value(placeholder_context)
func_kwargs[name] = placeholder
return tuple(func_args), func_kwargs
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable after
this function.
"""
func_graph._function_captures.clear() # pylint: disable=protected-access
ops.dismantle_graph(func_graph)
def override_func_graph_name_scope(func_graph, name_scope):
func_graph._name_stack = name_scope # pylint: disable=protected-access
| FuncGraph |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 485729,
"end": 485943
} | class ____(VegaLiteSchema):
"""InlineDataset schema wrapper."""
_schema = {"$ref": "#/definitions/InlineDataset"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| InlineDataset |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_shared.py | {
"start": 11860,
"end": 12817
} | class ____(Algorithm[HNSWSearchParams]):
m: Annotated[PositiveInt | None, Field(ge=2, le=100)] = None
ef_construction: Annotated[PositiveInt | None, Field(ge=4, le=1_000)] = None
@model_validator(mode="after")
def sanity_check(self) -> Self:
if (
self.m is not None
and self.ef_construction is not None
and self.ef_construction < 2 * self.m
):
raise ValueError(
"Parameter 'ef_construction' must be at least twice the value of 'm'."
)
return self
@override
def build_settings(self, exclude_none=True):
return {
key: value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
if key in ["m", "ef_construction"]
}
@override
def default_search_params(self) -> HNSWSearchParams:
return HNSWSearchParams()
| HNSW |
python | readthedocs__readthedocs.org | readthedocs/builds/storage.py | {
"start": 5780,
"end": 7828
} | class ____(BuildMediaStorageMixin, FileSystemStorage):
"""Storage subclass that writes build artifacts in PRODUCTION_MEDIA_ARTIFACTS or MEDIA_ROOT."""
def __init__(self, **kwargs):
location = kwargs.pop("location", None)
if not location:
# Mirrors the logic of getting the production media path
if settings.DEFAULT_PRIVACY_LEVEL == "public" or settings.DEBUG:
location = settings.MEDIA_ROOT
else:
location = settings.PRODUCTION_MEDIA_ARTIFACTS
super().__init__(location)
@cached_property
def _rclone(self):
return RCloneLocal(location=self.location)
def get_available_name(self, name, max_length=None):
"""
A hack to overwrite by default with the FileSystemStorage.
After upgrading to Django 2.2, this method can be removed
because subclasses can set OS_OPEN_FLAGS such that FileSystemStorage._save
will properly overwrite files.
See: https://github.com/django/django/pull/8476
"""
name = super().get_available_name(name, max_length=max_length)
if self.exists(name):
self.delete(name)
return name
def listdir(self, path):
"""
Return empty lists for nonexistent directories.
This mimics what cloud storages do.
"""
if not self.exists(path):
return [], []
return super().listdir(path)
def url(self, name, *args, **kwargs): # noqa
"""
Override to accept extra arguments and ignore them all.
This method helps us to bring compatibility between Azure Blob Storage
(which does not use the HTTP method) and Amazon S3 (who requires HTTP
method to build the signed URL).
``FileSystemStorage`` does not support any other argument than ``name``.
https://docs.djangoproject.com/en/2.2/ref/files/storage/#django.core.files.storage.Storage.url
"""
return super().url(name)
| BuildMediaFileSystemStorage |
python | huggingface__transformers | tests/models/resnet/test_modeling_resnet.py | {
"start": 9459,
"end": 10645
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("microsoft/resnet-50") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [-11.1069, -9.7877, -8.3777],
("cuda", 8): [-11.1069, -9.7877, -8.3777],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
@require_torch
| ResNetModelIntegrationTest |
python | doocs__leetcode | solution/1600-1699/1657.Determine if Two Strings Are Close/Solution.py | {
"start": 0,
"end": 253
} | class ____:
def closeStrings(self, word1: str, word2: str) -> bool:
cnt1, cnt2 = Counter(word1), Counter(word2)
return sorted(cnt1.values()) == sorted(cnt2.values()) and set(
cnt1.keys()
) == set(cnt2.keys())
| Solution |
python | pallets__flask | src/flask/json/tag.py | {
"start": 3501,
"end": 3863
} | class ____(JSONTag):
__slots__ = ()
def check(self, value: t.Any) -> bool:
return isinstance(value, dict)
def to_json(self, value: t.Any) -> t.Any:
# JSON objects may only have string keys, so don't bother tagging the
# key here.
return {k: self.serializer.tag(v) for k, v in value.items()}
tag = to_json
| PassDict |
python | numba__numba | numba/core/typeinfer.py | {
"start": 33027,
"end": 34126
} | class ____(object):
def __init__(self, target, attr, value, loc):
self.target = target
self.attr = attr
self.value = value
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of set attribute {attr!r} at {loc}",
attr=self.attr, loc=self.loc):
typevars = typeinfer.typevars
if not all(typevars[var.name].defined
for var in (self.target, self.value)):
return
targetty = typevars[self.target.name].getone()
valty = typevars[self.value.name].getone()
sig = typeinfer.context.resolve_setattr(targetty, self.attr,
valty)
if sig is None:
raise TypingError("Cannot resolve setattr: (%s).%s = %s" %
(targetty, self.attr, valty),
loc=self.loc)
self.signature = sig
def get_call_signature(self):
return self.signature
| SetAttrConstraint |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 37157,
"end": 41444
} | class ____(ModelOutput):
r"""
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*):
Tuple of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`):
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`):
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
"""
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None
transformer_decoder_object_queries: Optional[torch.FloatTensor] = None
transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None
transformer_decoder_mask_predictions: Optional[torch.FloatTensor] = None
transformer_decoder_class_predictions: Optional[torch.FloatTensor] = None
transformer_decoder_auxiliary_predictions: Optional[tuple[dict[str, torch.FloatTensor]]] = None
text_queries: Optional[torch.FloatTensor] = None
task_token: Optional[torch.FloatTensor] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`OneFormerForUniversalSegmentationOutput`].
This output can be directly passed to [`~OneFormerImageProcessor.post_process_semantic_segmentation`] or
[`~OneFormerImageProcessor.post_process_instance_segmentation`] or
[`~OneFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see
[`~OneFormerImageProcessor] for details regarding usage.
"""
)
| OneFormerModelOutput |
python | apache__airflow | airflow-core/tests/unit/models/test_dagrun.py | {
"start": 106387,
"end": 109756
} | class ____:
def test_get_last_ti_with_multiple_tis(self, dag_maker, session):
"""Test get_last_ti returns the last TI (first created) when multiple TIs exist"""
with dag_maker("test_dag", session=session) as dag:
BashOperator(task_id="task1", bash_command="echo 1")
BashOperator(task_id="task2", bash_command="echo 2")
BashOperator(task_id="task3", bash_command="echo 3")
dr = dag_maker.create_dagrun()
tis = dr.get_task_instances(session=session)
assert len(tis) == 3
# Mark some TIs with different states
tis[0].state = TaskInstanceState.SUCCESS
tis[1].state = TaskInstanceState.FAILED
tis[2].state = TaskInstanceState.RUNNING
session.commit()
last_ti = dr.get_last_ti(dag, session=session)
# Should return the last TI in the list (index -1)
assert last_ti is not None
assert last_ti == tis[-1]
assert last_ti.task_id == "task3"
def test_get_last_ti_filters_none_state_in_partial_dag(self, dag_maker, session):
"""Test get_last_ti filters out NONE state TIs when dag is partial"""
with dag_maker("test_dag", session=session) as dag:
BashOperator(task_id="task1", bash_command="echo 1")
BashOperator(task_id="task2", bash_command="echo 2")
dr = dag_maker.create_dagrun()
dag.partial = True
# Create task instances with different states
tis = dr.get_task_instances(session=session)
tis[0].state = State.NONE # Should be filtered out in partial DAG
tis[1].state = TaskInstanceState.RUNNING
session.commit()
last_ti = dr.get_last_ti(dag, session=session)
assert last_ti is not None
assert last_ti.state != State.NONE
assert last_ti.task_id == "task2"
def test_get_last_ti_filters_removed_tasks(self, dag_maker, session):
"""Test get_last_ti filters out REMOVED task instances"""
with dag_maker("test_dag", session=session) as dag:
BashOperator(task_id="task1", bash_command="echo 1")
BashOperator(task_id="task2", bash_command="echo 2")
BashOperator(task_id="task3", bash_command="echo 3")
dr = dag_maker.create_dagrun()
tis = dr.get_task_instances(session=session)
# Mark some TIs as removed
tis[0].state = TaskInstanceState.REMOVED
tis[1].state = TaskInstanceState.REMOVED
tis[2].state = TaskInstanceState.SUCCESS
session.commit()
last_ti = dr.get_last_ti(dag, session=session)
# Should return the TI that is not REMOVED
assert last_ti is not None
assert last_ti.state != TaskInstanceState.REMOVED
assert last_ti.task_id == "task3"
def test_get_last_ti_with_single_ti(self, dag_maker, session):
"""Test get_last_ti works with single task instance"""
with dag_maker("test_dag", session=session) as dag:
BashOperator(task_id="single_task", bash_command="echo 1")
dr = dag_maker.create_dagrun()
tis = dr.get_task_instances(session=session)
assert len(tis) == 1
last_ti = dr.get_last_ti(dag, session=session)
assert last_ti is not None
assert last_ti == tis[0]
assert last_ti.task_id == "single_task"
| TestDagRunGetLastTi |
python | sphinx-doc__sphinx | tests/roots/test-ext-inheritance_diagram/test.py | {
"start": 53,
"end": 91
} | class ____(DocHere):
pass
| DocSubDir1 |
python | huggingface__transformers | src/transformers/models/data2vec/modular_data2vec_text.py | {
"start": 1590,
"end": 1650
} | class ____(RobertaEmbeddings):
pass
| Data2VecTextEmbeddings |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/__init__.py | {
"start": 5197,
"end": 5345
} | class ____(BaseMockConnectionCursor):
def __init__(self):
super().__init__()
self.iterable = [(1, 1), (2, 2)]
| MockConnectionCursor |
python | PyCQA__pylint | tests/functional/a/abstract/abstract_class_instantiated_in_class.py | {
"start": 114,
"end": 297
} | class ____(metaclass=abc.ABCMeta):
@abc.abstractmethod
def bala(self):
pass
@classmethod
def portocala(cls):
instance = cls()
return instance
| Ala |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 2374,
"end": 2413
} | class ____(FakeModel):
pass
| TestModel |
python | PrefectHQ__prefect | src/prefect/utilities/annotations.py | {
"start": 2811,
"end": 3074
} | class ____(quote[T]):
def __new__(cls, expr: T) -> Self:
warnings.warn(
"Use of `Quote` is deprecated. Use `quote` instead.",
DeprecationWarning,
stacklevel=2,
)
return super().__new__(cls, expr)
| Quote |
python | PrefectHQ__prefect | src/prefect/workers/base.py | {
"start": 15068,
"end": 63975
} | class ____(abc.ABC, Generic[C, V, R]):
type: str
job_configuration: Type[C] = BaseJobConfiguration # type: ignore
job_configuration_variables: Optional[Type[V]] = None
_documentation_url = ""
_logo_url = ""
_description = ""
def __init__(
self,
work_pool_name: str,
work_queues: list[str] | None = None,
name: str | None = None,
prefetch_seconds: float | None = None,
create_pool_if_not_found: bool = True,
limit: int | None = None,
heartbeat_interval_seconds: int | None = None,
*,
base_job_template: dict[str, Any] | None = None,
):
"""
Base class for all Prefect workers.
Args:
name: The name of the worker. If not provided, a random one
will be generated. If provided, it cannot contain '/' or '%'.
The name is used to identify the worker in the UI; if two
processes have the same name, they will be treated as the same
worker.
work_pool_name: The name of the work pool to poll.
work_queues: A list of work queues to poll. If not provided, all
work queue in the work pool will be polled.
prefetch_seconds: The number of seconds to prefetch flow runs for.
create_pool_if_not_found: Whether to create the work pool
if it is not found. Defaults to `True`, but can be set to `False` to
ensure that work pools are not created accidentally.
limit: The maximum number of flow runs this worker should be running at
a given time.
heartbeat_interval_seconds: The number of seconds between worker heartbeats.
base_job_template: If creating the work pool, provide the base job
template to use. Logs a warning if the pool already exists.
"""
if name and ("/" in name or "%" in name):
raise ValueError("Worker name cannot contain '/' or '%'")
self.name: str = name or f"{self.__class__.__name__} {uuid4()}"
self._started_event: Optional[Event] = None
self.backend_id: Optional[UUID] = None
self._logger = get_worker_logger(self)
self.is_setup = False
self._create_pool_if_not_found = create_pool_if_not_found
self._base_job_template = base_job_template
self._work_pool_name = work_pool_name
self._work_queues: set[str] = set(work_queues) if work_queues else set()
self._prefetch_seconds: float = (
prefetch_seconds or PREFECT_WORKER_PREFETCH_SECONDS.value()
)
self.heartbeat_interval_seconds: int = (
heartbeat_interval_seconds or PREFECT_WORKER_HEARTBEAT_SECONDS.value()
)
self._work_pool: Optional[WorkPool] = None
self._exit_stack: AsyncExitStack = AsyncExitStack()
self._runs_task_group: Optional[anyio.abc.TaskGroup] = None
self._client: Optional[PrefectClient] = None
self._last_polled_time: datetime.datetime = prefect.types._datetime.now("UTC")
self._limit = limit
self._limiter: Optional[anyio.CapacityLimiter] = None
self._submitting_flow_run_ids: set[UUID] = set()
self._cancelling_flow_run_ids: set[UUID] = set()
self._scheduled_task_scopes: set[anyio.CancelScope] = set()
self._worker_metadata_sent = False
@property
def client(self) -> PrefectClient:
if self._client is None:
raise RuntimeError(
"Worker has not been correctly initialized. Please use the worker class as an async context manager."
)
return self._client
@property
def work_pool(self) -> WorkPool:
if self._work_pool is None:
raise RuntimeError(
"Worker has not been correctly initialized. Please use the worker class as an async context manager."
)
return self._work_pool
@property
def limiter(self) -> anyio.CapacityLimiter:
if self._limiter is None:
raise RuntimeError(
"Worker has not been correctly initialized. Please use the worker class as an async context manager."
)
return self._limiter
@classmethod
def get_documentation_url(cls) -> str:
return cls._documentation_url
@classmethod
def get_logo_url(cls) -> str:
return cls._logo_url
@classmethod
def get_description(cls) -> str:
return cls._description
@classmethod
def get_default_base_job_template(cls) -> dict[str, Any]:
if cls.job_configuration_variables is None:
schema = cls.job_configuration.model_json_schema()
# remove "template" key from all dicts in schema['properties'] because it is not a
# relevant field
for key, value in schema["properties"].items():
if isinstance(value, dict):
schema["properties"][key].pop("template", None)
variables_schema = schema
else:
variables_schema = cls.job_configuration_variables.model_json_schema()
variables_schema.pop("title", None)
return {
"job_configuration": cls.job_configuration.json_template(),
"variables": variables_schema,
}
@staticmethod
def get_worker_class_from_type(
type: str,
) -> Optional[Type["BaseWorker[Any, Any, Any]"]]:
"""
Returns the worker class for a given worker type. If the worker type
is not recognized, returns None.
"""
load_prefect_collections()
worker_registry = get_registry_for_type(BaseWorker)
if worker_registry is not None:
return worker_registry.get(type)
@staticmethod
def get_all_available_worker_types() -> list[str]:
"""
Returns all worker types available in the local registry.
"""
load_prefect_collections()
worker_registry = get_registry_for_type(BaseWorker)
if worker_registry is not None:
return list(worker_registry.keys())
return []
def get_name_slug(self) -> str:
return slugify(self.name)
def get_flow_run_logger(self, flow_run: "FlowRun") -> PrefectLogAdapter:
extra = {
"worker_name": self.name,
"work_pool_name": (
self._work_pool_name if self._work_pool else "<unknown>"
),
"work_pool_id": str(getattr(self._work_pool, "id", "unknown")),
}
if self.backend_id:
extra["worker_id"] = str(self.backend_id)
return flow_run_logger(flow_run=flow_run).getChild(
"worker",
extra=extra,
)
async def start(
self,
run_once: bool = False,
with_healthcheck: bool = False,
printer: Callable[..., None] = print,
) -> None:
"""
Starts the worker and runs the main worker loops.
By default, the worker will run loops to poll for scheduled/cancelled flow
runs and sync with the Prefect API server.
If `run_once` is set, the worker will only run each loop once and then return.
If `with_healthcheck` is set, the worker will start a healthcheck server which
can be used to determine if the worker is still polling for flow runs and restart
the worker if necessary.
Args:
run_once: If set, the worker will only run each loop once then return.
with_healthcheck: If set, the worker will start a healthcheck server.
printer: A `print`-like function where logs will be reported.
"""
healthcheck_server = None
healthcheck_thread = None
try:
async with self as worker:
# schedule the scheduled flow run polling loop
async with anyio.create_task_group() as loops_task_group:
loops_task_group.start_soon(
partial(
critical_service_loop,
workload=self.get_and_submit_flow_runs,
interval=PREFECT_WORKER_QUERY_SECONDS.value(),
run_once=run_once,
jitter_range=0.3,
backoff=4, # Up to ~1 minute interval during backoff
)
)
# schedule the sync loop
loops_task_group.start_soon(
partial(
critical_service_loop,
workload=self.sync_with_backend,
interval=self.heartbeat_interval_seconds,
run_once=run_once,
jitter_range=0.3,
backoff=4,
)
)
self._started_event = await self._emit_worker_started_event()
start_client_metrics_server()
if with_healthcheck:
from prefect.workers.server import build_healthcheck_server
# we'll start the ASGI server in a separate thread so that
# uvicorn does not block the main thread
healthcheck_server = build_healthcheck_server(
worker=worker,
query_interval_seconds=PREFECT_WORKER_QUERY_SECONDS.value(),
)
healthcheck_thread = threading.Thread(
name="healthcheck-server-thread",
target=healthcheck_server.run,
daemon=True,
)
healthcheck_thread.start()
printer(f"Worker {worker.name!r} started!")
# If running once, wait for active runs to finish before teardown
if run_once and self._limiter:
# Use the limiter's borrowed token count as the source of truth
while self.limiter.borrowed_tokens > 0:
self._logger.debug(
"Waiting for %s active run(s) to finish before shutdown...",
self.limiter.borrowed_tokens,
)
await anyio.sleep(0.1)
finally:
stop_client_metrics_server()
if healthcheck_server and healthcheck_thread:
self._logger.debug("Stopping healthcheck server...")
healthcheck_server.should_exit = True
healthcheck_thread.join()
self._logger.debug("Healthcheck server stopped.")
printer(f"Worker {worker.name!r} stopped!")
@abc.abstractmethod
async def run(
self,
flow_run: "FlowRun",
configuration: C,
task_status: Optional[anyio.abc.TaskStatus[int]] = None,
) -> R:
"""
Runs a given flow run on the current worker.
"""
raise NotImplementedError(
"Workers must implement a method for running submitted flow runs"
)
async def _initiate_run(
self,
flow_run: "FlowRun",
configuration: C,
) -> None:
"""
This method is called by the worker to initiate a flow run and should return as
soon as possible.
This method is used in `.submit` to allow non-blocking submission of flows. For
workers that wait for completion in their `run` method, this method should be
implemented to return immediately.
If this method is not implemented, `.submit` will fall back to the `.run` method.
"""
raise NotImplementedError(
"This worker has not implemented `_initiate_run`. Please use `run` instead."
)
async def submit(
self,
flow: "Flow[..., FR]",
parameters: dict[str, Any] | None = None,
job_variables: dict[str, Any] | None = None,
) -> "PrefectFlowRunFuture[FR]":
"""
EXPERIMENTAL: The interface for this method is subject to change.
Submits a flow to run via the worker.
Args:
flow: The flow to submit
parameters: The parameters to pass to the flow
Returns:
A flow run object
"""
warnings.warn(
"Ad-hoc flow submission via workers is experimental. The interface "
"and behavior of this feature are subject to change.",
category=FutureWarning,
)
if self._runs_task_group is None:
raise RuntimeError("Worker not properly initialized")
flow_run = await self._runs_task_group.start(
partial(
self._submit_adhoc_run,
flow=flow,
parameters=parameters,
job_variables=job_variables,
),
)
return PrefectFlowRunFuture(flow_run_id=flow_run.id)
async def _submit_adhoc_run(
self,
flow: "Flow[..., FR]",
parameters: dict[str, Any] | None = None,
job_variables: dict[str, Any] | None = None,
task_status: anyio.abc.TaskStatus["FlowRun"] | None = None,
):
"""
Submits a flow for the worker to kick off execution for.
"""
from prefect._experimental.bundles import (
aupload_bundle_to_storage,
convert_step_to_command,
create_bundle_for_flow_run,
)
if (
self.work_pool.storage_configuration.bundle_upload_step is None
or self.work_pool.storage_configuration.bundle_execution_step is None
):
raise RuntimeError(
f"Storage is not configured for work pool {self.work_pool.name!r}. "
"Please configure storage for the work pool by running `prefect "
"work-pool storage configure`."
)
from prefect.results import aresolve_result_storage, get_result_store
current_result_store = get_result_store()
# Check result storage and use the work pool default if needed
if (
current_result_store.result_storage is None
or isinstance(current_result_store.result_storage, LocalFileSystem)
and flow.result_storage is None
):
if (
self.work_pool.storage_configuration.default_result_storage_block_id
is None
):
self._logger.warning(
f"Flow {flow.name!r} has no result storage configured. Please configure "
"result storage for the flow if you want to retrieve the result for the flow run."
)
else:
# Use the work pool's default result storage block for the flow run to ensure the caller can retrieve the result
flow = flow.with_options(
result_storage=await aresolve_result_storage(
self.work_pool.storage_configuration.default_result_storage_block_id
),
persist_result=True,
)
bundle_key = str(uuid.uuid4())
upload_command = convert_step_to_command(
self.work_pool.storage_configuration.bundle_upload_step,
bundle_key,
quiet=True,
)
execute_command = convert_step_to_command(
self.work_pool.storage_configuration.bundle_execution_step, bundle_key
)
job_variables = (job_variables or {}) | {"command": " ".join(execute_command)}
parameters = parameters or {}
# Create a parent task run if this is a child flow run to ensure it shows up as a child flow in the UI
parent_task_run = None
if flow_run_ctx := FlowRunContext.get():
parent_task = Task[Any, Any](
name=flow.name,
fn=flow.fn,
version=flow.version,
)
parent_task_run = await parent_task.create_run(
flow_run_context=flow_run_ctx,
parameters=parameters,
)
flow_run = await self.client.create_flow_run(
flow,
parameters=flow.serialize_parameters(parameters),
state=Pending(),
job_variables=job_variables,
work_pool_name=self.work_pool.name,
tags=TagsContext.get().current_tags,
parent_task_run_id=getattr(parent_task_run, "id", None),
)
if task_status is not None:
# Emit the flow run object to .submit to allow it to return a future as soon as possible
task_status.started(flow_run)
# Avoid an API call to get the flow
api_flow = APIFlow(id=flow_run.flow_id, name=flow.name, labels={})
logger = self.get_flow_run_logger(flow_run)
configuration = await self.job_configuration.from_template_and_values(
base_job_template=self.work_pool.base_job_template,
values=job_variables,
client=self._client,
)
configuration.prepare_for_flow_run(
flow_run=flow_run,
flow=api_flow,
work_pool=self.work_pool,
worker_name=self.name,
)
bundle = create_bundle_for_flow_run(flow=flow, flow_run=flow_run)
await aupload_bundle_to_storage(bundle, bundle_key, upload_command)
logger.debug("Successfully uploaded execution bundle")
try:
# Call the implementation-specific run method with the constructed configuration. This is where the
# rubber meets the road.
try:
await self._initiate_run(flow_run, configuration)
except NotImplementedError:
result = await self.run(flow_run, configuration)
if result.status_code != 0:
await self._propose_crashed_state(
flow_run,
(
"Flow run infrastructure exited with non-zero status code"
f" {result.status_code}."
),
)
except Exception as exc:
# This flow run was being submitted and did not start successfully
logger.exception(
f"Failed to submit flow run '{flow_run.id}' to infrastructure."
)
message = f"Flow run could not be submitted to infrastructure:\n{exc!r}"
await self._propose_crashed_state(flow_run, message, client=self.client)
@classmethod
def __dispatch_key__(cls) -> str | None:
if cls.__name__ == "BaseWorker":
return None # The base class is abstract
return cls.type
async def setup(self) -> None:
"""Prepares the worker to run."""
self._logger.debug("Setting up worker...")
self._runs_task_group = anyio.create_task_group()
self._limiter = (
anyio.CapacityLimiter(self._limit) if self._limit is not None else None
)
if not PREFECT_TEST_MODE and not PREFECT_API_URL.value():
raise ValueError("`PREFECT_API_URL` must be set to start a Worker.")
self._client = get_client()
await self._exit_stack.enter_async_context(self._client)
await self._exit_stack.enter_async_context(self._runs_task_group)
await self.sync_with_backend()
self.is_setup = True
async def teardown(self, *exc_info: Any) -> None:
"""Cleans up resources after the worker is stopped."""
self._logger.debug("Tearing down worker...")
self.is_setup: bool = False
for scope in self._scheduled_task_scopes:
scope.cancel()
# Emit stopped event before closing client
if self._started_event:
try:
await self._emit_worker_stopped_event(self._started_event)
except Exception:
self._logger.exception("Failed to emit worker stopped event")
await self._exit_stack.__aexit__(*exc_info)
self._runs_task_group = None
self._client = None
def is_worker_still_polling(self, query_interval_seconds: float) -> bool:
"""
This method is invoked by a webserver healthcheck handler
and returns a boolean indicating if the worker has recorded a
scheduled flow run poll within a variable amount of time.
The `query_interval_seconds` is the same value that is used by
the loop services - we will evaluate if the _last_polled_time
was within that interval x 30 (so 10s -> 5m)
The instance property `self._last_polled_time`
is currently set/updated in `get_and_submit_flow_runs()`
"""
threshold_seconds = query_interval_seconds * 30
seconds_since_last_poll = (
prefect.types._datetime.now("UTC") - self._last_polled_time
).seconds
is_still_polling = seconds_since_last_poll <= threshold_seconds
if not is_still_polling:
self._logger.error(
f"Worker has not polled in the last {seconds_since_last_poll} seconds "
"and should be restarted"
)
return is_still_polling
async def get_and_submit_flow_runs(self) -> list["FlowRun"]:
runs_response = await self._get_scheduled_flow_runs()
self._last_polled_time = prefect.types._datetime.now("UTC")
return await self._submit_scheduled_flow_runs(flow_run_response=runs_response)
async def _update_local_work_pool_info(self) -> None:
if TYPE_CHECKING:
assert self._client is not None
try:
work_pool = await self._client.read_work_pool(
work_pool_name=self._work_pool_name
)
except ObjectNotFound:
if self._create_pool_if_not_found:
wp = WorkPoolCreate(
name=self._work_pool_name,
type=self.type,
)
if self._base_job_template is not None:
wp.base_job_template = self._base_job_template
work_pool = await self._client.create_work_pool(work_pool=wp)
self._logger.info(f"Work pool {self._work_pool_name!r} created.")
else:
self._logger.warning(f"Work pool {self._work_pool_name!r} not found!")
if self._base_job_template is not None:
self._logger.warning(
"Ignoring supplied base job template because the work pool"
" already exists"
)
return
# if the remote config type changes (or if it's being loaded for the
# first time), check if it matches the local type and warn if not
if getattr(self._work_pool, "type", 0) != work_pool.type:
if work_pool.type != self.__class__.type:
self._logger.warning(
"Worker type mismatch! This worker process expects type "
f"{self.type!r} but received {work_pool.type!r}"
" from the server. Unexpected behavior may occur."
)
# once the work pool is loaded, verify that it has a `base_job_template` and
# set it if not
if not work_pool.base_job_template:
job_template = self.__class__.get_default_base_job_template()
await self._set_work_pool_template(work_pool, job_template)
work_pool.base_job_template = job_template
self._work_pool = work_pool
async def _worker_metadata(self) -> Optional[WorkerMetadata]:
"""
Returns metadata about installed Prefect collections for the worker.
"""
installed_integrations = load_prefect_collections().keys()
integration_versions = [
Integration(name=dist.metadata["Name"], version=dist.version) # pyright: ignore[reportOptionalSubscript]
for dist in distributions()
# PyPI packages often use dashes, but Python package names use underscores
# because they must be valid identifiers.
if dist.metadata # pyright: ignore[reportOptionalMemberAccess]
and (name := dist.metadata.get("Name"))
and (name.replace("-", "_") in installed_integrations)
]
if integration_versions:
return WorkerMetadata(integrations=integration_versions)
return None
async def _send_worker_heartbeat(self) -> Optional[UUID]:
"""
Sends a heartbeat to the API.
"""
if not self._client:
self._logger.warning("Client has not been initialized; skipping heartbeat.")
return None
if not self._work_pool:
self._logger.debug("Worker has no work pool; skipping heartbeat.")
return None
should_get_worker_id = self._should_get_worker_id()
params: dict[str, Any] = {
"work_pool_name": self._work_pool_name,
"worker_name": self.name,
"heartbeat_interval_seconds": self.heartbeat_interval_seconds,
"get_worker_id": should_get_worker_id,
}
if (
self._client.server_type == ServerType.CLOUD
and not self._worker_metadata_sent
):
worker_metadata = await self._worker_metadata()
if worker_metadata:
params["worker_metadata"] = worker_metadata
self._worker_metadata_sent = True
worker_id = None
try:
worker_id = await self._client.send_worker_heartbeat(**params)
except httpx.HTTPStatusError as e:
if e.response.status_code == 422 and should_get_worker_id:
self._logger.warning(
"Failed to retrieve worker ID from the Prefect API server."
)
params["get_worker_id"] = False
worker_id = await self._client.send_worker_heartbeat(**params)
else:
raise e
if should_get_worker_id and worker_id is None:
self._logger.warning(
"Failed to retrieve worker ID from the Prefect API server."
)
return worker_id
async def sync_with_backend(self) -> None:
"""
Updates the worker's local information about it's current work pool and
queues. Sends a worker heartbeat to the API.
"""
await self._update_local_work_pool_info()
remote_id = await self._send_worker_heartbeat()
if remote_id:
self.backend_id = remote_id
self._logger = get_worker_logger(self)
self._logger.debug(
"Worker synchronized with the Prefect API server. "
+ (f"Remote ID: {self.backend_id}" if self.backend_id else "")
)
def _should_get_worker_id(self):
"""Determines if the worker should request an ID from the API server."""
return (
self._client
and self._client.server_type == ServerType.CLOUD
and self.backend_id is None
)
async def _get_scheduled_flow_runs(
self,
) -> list["WorkerFlowRunResponse"]:
"""
Retrieve scheduled flow runs from the work pool's queues.
"""
scheduled_before = prefect.types._datetime.now("UTC") + datetime.timedelta(
seconds=int(self._prefetch_seconds)
)
self._logger.debug(
f"Querying for flow runs scheduled before {scheduled_before}"
)
try:
scheduled_flow_runs = (
await self.client.get_scheduled_flow_runs_for_work_pool(
work_pool_name=self._work_pool_name,
scheduled_before=scheduled_before,
work_queue_names=list(self._work_queues),
)
)
self._logger.debug(
f"Discovered {len(scheduled_flow_runs)} scheduled_flow_runs"
)
return scheduled_flow_runs
except ObjectNotFound:
# the pool doesn't exist; it will be created on the next
# heartbeat (or an appropriate warning will be logged)
return []
async def _submit_scheduled_flow_runs(
self, flow_run_response: list["WorkerFlowRunResponse"]
) -> list["FlowRun"]:
"""
Takes a list of WorkerFlowRunResponses and submits the referenced flow runs
for execution by the worker.
"""
submittable_flow_runs = [entry.flow_run for entry in flow_run_response]
for flow_run in submittable_flow_runs:
if flow_run.id in self._submitting_flow_run_ids:
self._logger.debug(
f"Skipping {flow_run.id} because it's already being submitted"
)
continue
try:
if self._limiter:
self._limiter.acquire_on_behalf_of_nowait(flow_run.id)
except anyio.WouldBlock:
self._logger.debug(
f"Flow run limit reached; {self.limiter.borrowed_tokens} flow runs"
" in progress."
)
break
else:
run_logger = self.get_flow_run_logger(flow_run)
run_logger.info(
f"Worker '{self.name}' submitting flow run '{flow_run.id}'"
)
if self.backend_id:
try:
worker_url = url_for(
"worker",
obj_id=self.backend_id,
work_pool_name=self._work_pool_name,
)
run_logger.info(
f"Running on worker id: {self.backend_id}. See worker logs here: {worker_url}"
)
except ValueError as ve:
run_logger.warning(f"Failed to generate worker URL: {ve}")
self._submitting_flow_run_ids.add(flow_run.id)
if TYPE_CHECKING:
assert self._runs_task_group is not None
self._runs_task_group.start_soon(
self._submit_run,
flow_run,
)
return list(
filter(
lambda run: run.id in self._submitting_flow_run_ids,
submittable_flow_runs,
)
)
async def _submit_run(self, flow_run: "FlowRun") -> None:
"""
Submits a given flow run for execution by the worker.
"""
run_logger = self.get_flow_run_logger(flow_run)
if flow_run.deployment_id:
try:
await self.client.read_deployment(flow_run.deployment_id)
except ObjectNotFound:
self._logger.exception(
f"Deployment {flow_run.deployment_id} no longer exists. "
f"Flow run {flow_run.id} will not be submitted for"
" execution"
)
self._submitting_flow_run_ids.remove(flow_run.id)
await self._mark_flow_run_as_cancelled(
flow_run,
state_updates=dict(
message=f"Deployment {flow_run.deployment_id} no longer exists, cancelled run."
),
)
return
ready_to_submit = await self._propose_pending_state(flow_run)
self._logger.debug(f"Ready to submit {flow_run.id}: {ready_to_submit}")
if ready_to_submit:
if TYPE_CHECKING:
assert self._runs_task_group is not None
readiness_result = await self._runs_task_group.start(
self._submit_run_and_capture_errors, flow_run
)
if readiness_result and not isinstance(readiness_result, Exception):
try:
await self.client.update_flow_run(
flow_run_id=flow_run.id,
infrastructure_pid=str(readiness_result),
)
except Exception:
run_logger.exception(
"An error occurred while setting the `infrastructure_pid` on "
f"flow run {flow_run.id!r}. The flow run will "
"not be cancellable."
)
run_logger.info(f"Completed submission of flow run '{flow_run.id}'")
else:
# If the run is not ready to submit, release the concurrency slot
self._release_limit_slot(flow_run.id)
else:
self._release_limit_slot(flow_run.id)
self._submitting_flow_run_ids.remove(flow_run.id)
async def _submit_run_and_capture_errors(
self,
flow_run: "FlowRun",
task_status: anyio.abc.TaskStatus[int | Exception] | None = None,
) -> BaseWorkerResult | Exception:
run_logger = self.get_flow_run_logger(flow_run)
try:
configuration = await self._get_configuration(flow_run)
submitted_event = self._emit_flow_run_submitted_event(configuration)
await self._give_worker_labels_to_flow_run(flow_run.id)
result = await self.run(
flow_run=flow_run,
task_status=task_status,
configuration=configuration,
)
except Exception as exc:
if task_status and not getattr(task_status, "_future").done():
# This flow run was being submitted and did not start successfully
run_logger.exception(
f"Failed to submit flow run '{flow_run.id}' to infrastructure."
)
# Mark the task as started to prevent agent crash
task_status.started(exc)
message = f"Flow run could not be submitted to infrastructure:\n{exc!r}"
await self._propose_crashed_state(flow_run, message)
else:
run_logger.exception(
f"An error occurred while monitoring flow run '{flow_run.id}'. "
"The flow run will not be marked as failed, but an issue may have "
"occurred."
)
return exc
finally:
self._release_limit_slot(flow_run.id)
if task_status and not getattr(task_status, "_future").done():
run_logger.error(
f"Infrastructure returned without reporting flow run '{flow_run.id}' "
"as started or raising an error. This behavior is not expected and "
"generally indicates improper implementation of infrastructure. The "
"flow run will not be marked as failed, but an issue may have occurred."
)
# Mark the task as started to prevent agent crash
task_status.started(
RuntimeError(
"Infrastructure returned without reporting flow run as started or raising an error."
)
)
if result.status_code != 0:
await self._propose_crashed_state(
flow_run,
(
"Flow run infrastructure exited with non-zero status code"
f" {result.status_code}."
),
)
if submitted_event:
self._emit_flow_run_executed_event(result, configuration, submitted_event)
return result
def _release_limit_slot(self, flow_run_id: UUID) -> None:
"""
Frees up a slot taken by the given flow run id.
This method gracefully handles cases where the slot has already been released
to prevent worker crashes from double-release scenarios.
"""
if self._limiter:
try:
self._limiter.release_on_behalf_of(flow_run_id)
self._logger.debug("Limit slot released for flow run '%s'", flow_run_id)
except RuntimeError:
# Slot was already released - this can happen in certain error paths
# where multiple cleanup attempts occur. Log it but don't crash.
self._logger.debug(
"Limit slot for flow run '%s' was already released", flow_run_id
)
def get_status(self) -> dict[str, Any]:
"""
Retrieves the status of the current worker including its name, current worker
pool, the work pool queues it is polling, and its local settings.
"""
return {
"name": self.name,
"work_pool": (
self._work_pool.model_dump(mode="json")
if self._work_pool is not None
else None
),
"settings": {
"prefetch_seconds": self._prefetch_seconds,
},
}
async def _get_configuration(
self,
flow_run: "FlowRun",
deployment: Optional["DeploymentResponse"] = None,
) -> C:
if not deployment and flow_run.deployment_id:
deployment = await self.client.read_deployment(flow_run.deployment_id)
flow = await self.client.read_flow(flow_run.flow_id)
deployment_vars = getattr(deployment, "job_variables", {}) or {}
flow_run_vars = flow_run.job_variables or {}
job_variables = {**deployment_vars}
# merge environment variables carefully, otherwise full override
if isinstance(job_variables.get("env"), dict):
job_variables["env"].update(flow_run_vars.pop("env", {}))
job_variables.update(flow_run_vars)
configuration = await self.job_configuration.from_template_and_values(
base_job_template=self.work_pool.base_job_template,
values=job_variables,
client=self.client,
)
try:
configuration.prepare_for_flow_run(
flow_run=flow_run,
deployment=deployment,
flow=flow,
work_pool=self.work_pool,
worker_name=self.name,
)
except TypeError:
warnings.warn(
"This worker is missing the `work_pool` and `worker_name` arguments "
"in its JobConfiguration.prepare_for_flow_run method. Please update "
"the worker's JobConfiguration class to accept these arguments to "
"avoid this warning.",
category=PrefectDeprecationWarning,
)
# Handle older subclasses that don't accept work_pool and worker_name
configuration.prepare_for_flow_run(
flow_run=flow_run, deployment=deployment, flow=flow
)
return configuration
async def _propose_pending_state(self, flow_run: "FlowRun") -> bool:
run_logger = self.get_flow_run_logger(flow_run)
state = flow_run.state
try:
state = await propose_state(self.client, Pending(), flow_run_id=flow_run.id)
except Abort as exc:
run_logger.info(
(
f"Aborted submission of flow run '{flow_run.id}'. "
f"Server sent an abort signal: {exc}"
),
)
return False
except Exception:
run_logger.exception(
f"Failed to update state of flow run '{flow_run.id}'",
)
return False
if not state.is_pending():
run_logger.info(
(
f"Aborted submission of flow run '{flow_run.id}': "
f"Server returned a non-pending state {state.type.value!r}"
),
)
return False
return True
async def _propose_failed_state(self, flow_run: "FlowRun", exc: Exception) -> None:
run_logger = self.get_flow_run_logger(flow_run)
try:
await propose_state(
self.client,
await exception_to_failed_state(message="Submission failed.", exc=exc),
flow_run_id=flow_run.id,
)
except Abort:
# We've already failed, no need to note the abort but we don't want it to
# raise in the agent process
pass
except Exception:
run_logger.error(
f"Failed to update state of flow run '{flow_run.id}'",
exc_info=True,
)
async def _propose_crashed_state(
self, flow_run: "FlowRun", message: str, client: PrefectClient | None = None
) -> None:
run_logger = self.get_flow_run_logger(flow_run)
try:
state = await propose_state(
client or self.client,
Crashed(message=message),
flow_run_id=flow_run.id,
)
except Abort:
# Flow run already marked as failed
pass
except ObjectNotFound:
# Flow run was deleted - log it but don't crash the worker
run_logger.debug(
f"Flow run '{flow_run.id}' was deleted before state could be updated"
)
except Exception:
run_logger.exception(f"Failed to update state of flow run '{flow_run.id}'")
else:
if state.is_crashed():
run_logger.info(
f"Reported flow run '{flow_run.id}' as crashed: {message}"
)
async def _mark_flow_run_as_cancelled(
self, flow_run: "FlowRun", state_updates: dict[str, Any] | None = None
) -> None:
state_updates = state_updates or {}
state_updates.setdefault("name", "Cancelled")
if flow_run.state:
state_updates.setdefault("type", StateType.CANCELLED)
state = flow_run.state.model_copy(update=state_updates)
else:
# Unexpectedly when flow run does not have a state, create a new one
# does not need to explicitly set the type
state = Cancelled(**state_updates)
try:
await self.client.set_flow_run_state(flow_run.id, state, force=True)
except ObjectNotFound:
# Flow run was deleted - log it but don't crash the worker
run_logger = self.get_flow_run_logger(flow_run)
run_logger.debug(
f"Flow run '{flow_run.id}' was deleted before it could be marked as cancelled"
)
# Do not remove the flow run from the cancelling set immediately because
# the API caches responses for the `read_flow_runs` and we do not want to
# duplicate cancellations.
await self._schedule_task(
60 * 10, self._cancelling_flow_run_ids.remove, flow_run.id
)
async def _set_work_pool_template(
self, work_pool: "WorkPool", job_template: dict[str, Any]
):
"""Updates the `base_job_template` for the worker's work pool server side."""
await self.client.update_work_pool(
work_pool_name=work_pool.name,
work_pool=WorkPoolUpdate(
base_job_template=job_template,
),
)
async def _schedule_task(
self, __in_seconds: int, fn: Callable[..., Any], *args: Any, **kwargs: Any
):
"""
Schedule a background task to start after some time.
These tasks will be run immediately when the worker exits instead of waiting.
The function may be async or sync. Async functions will be awaited.
"""
if not self._runs_task_group:
raise RuntimeError(
"Worker has not been correctly initialized. Please use the worker class as an async context manager."
)
async def wrapper(task_status: anyio.abc.TaskStatus[Any]):
# If we are shutting down, do not sleep; otherwise sleep until the scheduled
# time or shutdown
if self.is_setup:
with anyio.CancelScope() as scope:
self._scheduled_task_scopes.add(scope)
task_status.started()
await anyio.sleep(__in_seconds)
self._scheduled_task_scopes.remove(scope)
else:
task_status.started()
result = fn(*args, **kwargs)
if asyncio.iscoroutine(result):
await result
await self._runs_task_group.start(wrapper)
async def _give_worker_labels_to_flow_run(self, flow_run_id: UUID):
"""
Give this worker's identifying labels to the specified flow run.
"""
if self._client:
labels: KeyValueLabels = {
"prefect.worker.name": self.name,
"prefect.worker.type": self.type,
}
if self._work_pool:
labels.update(
{
"prefect.work-pool.name": self._work_pool.name,
"prefect.work-pool.id": str(self._work_pool.id),
}
)
await self._client.update_flow_run_labels(flow_run_id, labels)
async def __aenter__(self) -> Self:
self._logger.debug("Entering worker context...")
await self.setup()
return self
async def __aexit__(self, *exc_info: Any) -> None:
try:
self._logger.debug("Exiting worker context...")
await self.teardown(*exc_info)
except (ExceptionGroup, BaseExceptionGroup) as exc:
# For less verbose tracebacks
exceptions = exc.exceptions
if len(exceptions) == 1:
raise exceptions[0] from None
else:
raise
def __repr__(self) -> str:
return f"Worker(pool={self._work_pool_name!r}, name={self.name!r})"
def _event_resource(self):
return {
"prefect.resource.id": f"prefect.worker.{self.type}.{self.get_name_slug()}",
"prefect.resource.name": self.name,
"prefect.version": prefect.__version__,
"prefect.worker-type": self.type,
}
def _event_related_resources(
self,
configuration: BaseJobConfiguration | None = None,
include_self: bool = False,
) -> list[RelatedResource]:
related: list[RelatedResource] = []
if configuration:
related += getattr(configuration, "_related_resources")()
if self._work_pool:
related.append(
object_as_related_resource(
kind="work-pool", role="work-pool", object=self._work_pool
)
)
if include_self:
worker_resource = self._event_resource()
worker_resource["prefect.resource.role"] = "worker"
related.append(RelatedResource.model_validate(worker_resource))
return related
def _emit_flow_run_submitted_event(
self, configuration: BaseJobConfiguration
) -> Event | None:
return emit_event(
event="prefect.worker.submitted-flow-run",
resource=self._event_resource(),
related=self._event_related_resources(configuration=configuration),
)
def _emit_flow_run_executed_event(
self,
result: BaseWorkerResult,
configuration: BaseJobConfiguration,
submitted_event: Event | None = None,
):
related = self._event_related_resources(configuration=configuration)
for resource in related:
if resource.role == "flow-run":
resource["prefect.infrastructure.identifier"] = str(result.identifier)
resource["prefect.infrastructure.status-code"] = str(result.status_code)
emit_event(
event="prefect.worker.executed-flow-run",
resource=self._event_resource(),
related=related,
follows=submitted_event,
)
async def _emit_worker_started_event(self) -> Event | None:
return emit_event(
"prefect.worker.started",
resource=self._event_resource(),
related=self._event_related_resources(),
)
async def _emit_worker_stopped_event(self, started_event: Event):
emit_event(
"prefect.worker.stopped",
resource=self._event_resource(),
related=self._event_related_resources(),
follows=started_event,
)
| BaseWorker |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py | {
"start": 387,
"end": 453
} | class ____(ABC): # error
def method(self):
foo()
| Base_1 |
python | ray-project__ray | python/ray/data/datasource/file_datasink.py | {
"start": 9136,
"end": 11042
} | class ____(_FileDatasink):
"""A datasink that writes multiple rows to each file.
Subclasses must implement ``write_block_to_file`` and call the superclass
constructor.
Examples:
.. testcode::
class CSVDatasink(BlockBasedFileDatasink):
def __init__(self, path: str):
super().__init__(path, file_format="csv")
def write_block_to_file(self, block: BlockAccessor, file: "pyarrow.NativeFile"):
from pyarrow import csv
csv.write_csv(block.to_arrow(), file)
""" # noqa: E501
def __init__(
self, path, *, min_rows_per_file: Optional[int] = None, **file_datasink_kwargs
):
super().__init__(path, **file_datasink_kwargs)
self._min_rows_per_file = min_rows_per_file
def write_block_to_file(self, block: BlockAccessor, file: "pyarrow.NativeFile"):
"""Write a block of data to a file.
Args:
block: The block to write.
file: The file to write the block to.
"""
raise NotImplementedError
def write_block(self, block: BlockAccessor, block_index: int, ctx: TaskContext):
filename = self.filename_provider.get_filename_for_block(
block, ctx.kwargs[WRITE_UUID_KWARG_NAME], ctx.task_idx, block_index
)
write_path = posixpath.join(self.path, filename)
def write_block_to_path():
with self.open_output_stream(write_path) as file:
self.write_block_to_file(block, file)
logger.debug(f"Writing {write_path} file.")
call_with_retry(
write_block_to_path,
description=f"write '{write_path}'",
match=self._data_context.retried_io_errors,
)
@property
def min_rows_per_write(self) -> Optional[int]:
return self._min_rows_per_file
| BlockBasedFileDatasink |
python | wandb__wandb | wandb/vendor/pygments/lexers/javascript.py | {
"start": 57559,
"end": 60126
} | class ____(RegexLexer):
"""
For `Juttle`_ source code.
.. _Juttle: https://github.com/juttle/juttle
"""
name = 'Juttle'
aliases = ['juttle', 'juttle']
filenames = ['*.juttle']
mimetypes = ['application/juttle', 'application/x-juttle',
'text/x-juttle', 'text/juttle']
flags = re.DOTALL | re.UNICODE | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r':\d{2}:\d{2}:\d{2}(\.\d*)?:', String.Moment),
(r':(now|beginning|end|forever|yesterday|today|tomorrow|(\d+(\.\d*)?|\.\d+)(ms|[smhdwMy])?):', String.Moment),
(r':\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(\.\d*)?)?(Z|[+-]\d{2}:\d{2}|[+-]\d{4})?:', String.Moment),
(r':((\d+(\.\d*)?|\.\d+)[ ]+)?(millisecond|second|minute|hour|day|week|month|year)[s]?'
r'(([ ]+and[ ]+(\d+[ ]+)?(millisecond|second|minute|hour|day|week|month|year)[s]?)'
r'|[ ]+(ago|from[ ]+now))*:', String.Moment),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(import|return|continue|if|else)\b', Keyword, 'slashstartsregex'),
(r'(var|const|function|reducer|sub|input)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(batch|emit|filter|head|join|keep|pace|pass|put|read|reduce|remove|'
r'sequence|skip|sort|split|tail|unbatch|uniq|view|write)\b', Keyword.Reserved),
(r'(true|false|null|Infinity)\b', Keyword.Constant),
(r'(Array|Date|Juttle|Math|Number|Object|RegExp|String)\b', Name.Builtin),
(JS_IDENT, Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
]
}
| JuttleLexer |
python | facebook__pyre-check | client/commands/tests/subscription_test.py | {
"start": 316,
"end": 3684
} | class ____(testslide.TestCase):
def test_parse_response(self) -> None:
def assert_parsed(response: str, expected: subscription.Response) -> None:
self.assertEqual(
subscription.Response.parse(response),
expected,
)
def assert_not_parsed(response: str) -> None:
with self.assertRaises(incremental.InvalidServerResponse):
subscription.Response.parse(response)
assert_not_parsed("derp")
assert_not_parsed("{}")
assert_not_parsed("[]")
assert_not_parsed('["Error"]')
assert_not_parsed('{"name": "foo", "no_body": []}')
assert_not_parsed('{"body": [], "no_name": "foo"}')
assert_not_parsed('{"name": "foo", "body": ["Malformed"]}')
assert_not_parsed('{"name": "foo", "body": ["TypeErrors", {"errors": 42}]}')
assert_not_parsed('{"name": "foo", "body": ["StatusUpdate", 42]}')
assert_not_parsed('{"name": "foo", "body": ["StatusUpdate", []]}')
assert_parsed(
json.dumps({"name": "foo", "body": ["TypeErrors", []]}),
expected=subscription.Response(body=subscription.TypeErrors()),
)
assert_parsed(
json.dumps({"name": "foo", "body": ["TypeErrors", {}]}),
expected=subscription.Response(body=subscription.TypeErrors()),
)
assert_parsed(
json.dumps(
{
"name": "foo",
"body": [
"TypeErrors",
[
{
"line": 1,
"column": 1,
"stop_line": 2,
"stop_column": 2,
"path": "test.py",
"code": 42,
"name": "Fake name",
"description": "Fake description",
},
],
],
}
),
expected=subscription.Response(
body=subscription.TypeErrors(
[
error.Error(
line=1,
column=1,
stop_line=2,
stop_column=2,
path=Path("test.py"),
code=42,
name="Fake name",
description="Fake description",
),
]
),
),
)
assert_parsed(
json.dumps(
{
"name": "foo",
"body": ["StatusUpdate", ["derp"]],
}
),
expected=subscription.Response(
body=subscription.StatusUpdate(kind="derp"),
),
)
assert_parsed(
json.dumps(
{
"name": "foo",
"body": ["Error", "rip and tear!"],
}
),
expected=subscription.Response(
body=subscription.Error(message="rip and tear!"),
),
)
| SubscriptionTest |
python | openai__openai-python | src/openai/types/realtime/realtime_audio_input_turn_detection_param.py | {
"start": 2294,
"end": 3280
} | class ____(TypedDict, total=False):
type: Required[Literal["semantic_vad"]]
"""Type of turn detection, `semantic_vad` to turn on Semantic VAD."""
create_response: bool
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Literal["low", "medium", "high", "auto"]
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s,
4s, and 2s respectively.
"""
interrupt_response: bool
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
RealtimeAudioInputTurnDetectionParam: TypeAlias = Union[ServerVad, SemanticVad]
| SemanticVad |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink34.py | {
"start": 315,
"end": 1160
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink34.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("A1", self.image_dir + "blue.png")
worksheet.insert_image(
"B3", self.image_dir + "red.jpg", {"url": "https://github.com/jmcnamara"}
)
worksheet.insert_image("D5", self.image_dir + "yellow.jpg")
worksheet.insert_image(
"F9", self.image_dir + "grey.png", {"url": "https://github.com"}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 232704,
"end": 233209
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, pokemon_name: str):
"""Airbyte Source for Pokeapi.
Documentation can be found at https://docs.airbyte.com/integrations/sources/pokeapi
Args:
name (str): The name of the destination.
pokemon_name (str): Pokemon requested from the API.
"""
self.pokemon_name = check.str_param(pokemon_name, "pokemon_name")
super().__init__("Pokeapi", name)
| PokeapiSource |
python | RaRe-Technologies__gensim | gensim/models/word2vec.py | {
"start": 99064,
"end": 101661
} | class ____:
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""Like :class:`~gensim.models.word2vec.LineSentence`, but process all files in a directory
in alphabetical order by filename.
The directory must only contain files that can be read by :class:`gensim.models.word2vec.LineSentence`:
.bz2, .gz, and text files. Any file not ending with .bz2 or .gz is assumed to be a text file.
The format of files (either text, or compressed text files) in the path is one sentence = one line,
with words already preprocessed and separated by whitespace.
Warnings
--------
Does **not recurse** into subdirectories.
Parameters
----------
source : str
Path to the directory.
limit : int or None
Read only the first `limit` lines from each file. Read all if limit is None (the default).
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
if os.path.isfile(self.source):
logger.debug('single file given as source, rather than a directory of files')
logger.debug('consider using models.word2vec.LineSentence for a single file')
self.input_files = [self.source] # force code compatibility with list of files
elif os.path.isdir(self.source):
self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path
logger.info('reading directory %s', self.source)
self.input_files = os.listdir(self.source)
self.input_files = [self.source + filename for filename in self.input_files] # make full paths
self.input_files.sort() # makes sure it happens in filename order
else: # not a file or a directory, then we can't do anything with it
raise ValueError('input is neither a file nor a path')
logger.info('files read into PathLineSentences:%s', '\n'.join(self.input_files))
def __iter__(self):
"""iterate through the files"""
for file_name in self.input_files:
logger.info('reading file %s', file_name)
with utils.open(file_name, 'rb') as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:i + self.max_sentence_length]
i += self.max_sentence_length
| PathLineSentences |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 6646,
"end": 6693
} | class ____(A):
def f(self):
return 2
| B |
python | encode__starlette | starlette/responses.py | {
"start": 9785,
"end": 21431
} | class ____(Response):
chunk_size = 64 * 1024
def __init__(
self,
path: str | os.PathLike[str],
status_code: int = 200,
headers: Mapping[str, str] | None = None,
media_type: str | None = None,
background: BackgroundTask | None = None,
filename: str | None = None,
stat_result: os.stat_result | None = None,
method: str | None = None,
content_disposition_type: str = "attachment",
) -> None:
self.path = path
self.status_code = status_code
self.filename = filename
if method is not None:
warnings.warn(
"The 'method' parameter is not used, and it will be removed.",
DeprecationWarning,
)
if media_type is None:
media_type = guess_type(filename or path)[0] or "text/plain"
self.media_type = media_type
self.background = background
self.init_headers(headers)
self.headers.setdefault("accept-ranges", "bytes")
if self.filename is not None:
content_disposition_filename = quote(self.filename)
if content_disposition_filename != self.filename:
content_disposition = f"{content_disposition_type}; filename*=utf-8''{content_disposition_filename}"
else:
content_disposition = f'{content_disposition_type}; filename="{self.filename}"'
self.headers.setdefault("content-disposition", content_disposition)
self.stat_result = stat_result
if stat_result is not None:
self.set_stat_headers(stat_result)
def set_stat_headers(self, stat_result: os.stat_result) -> None:
content_length = str(stat_result.st_size)
last_modified = formatdate(stat_result.st_mtime, usegmt=True)
etag_base = str(stat_result.st_mtime) + "-" + str(stat_result.st_size)
etag = f'"{hashlib.md5(etag_base.encode(), usedforsecurity=False).hexdigest()}"'
self.headers.setdefault("content-length", content_length)
self.headers.setdefault("last-modified", last_modified)
self.headers.setdefault("etag", etag)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
send_header_only: bool = scope["method"].upper() == "HEAD"
send_pathsend: bool = "http.response.pathsend" in scope.get("extensions", {})
if self.stat_result is None:
try:
stat_result = await anyio.to_thread.run_sync(os.stat, self.path)
self.set_stat_headers(stat_result)
except FileNotFoundError:
raise RuntimeError(f"File at path {self.path} does not exist.")
else:
mode = stat_result.st_mode
if not stat.S_ISREG(mode):
raise RuntimeError(f"File at path {self.path} is not a file.")
else:
stat_result = self.stat_result
headers = Headers(scope=scope)
http_range = headers.get("range")
http_if_range = headers.get("if-range")
if http_range is None or (http_if_range is not None and not self._should_use_range(http_if_range)):
await self._handle_simple(send, send_header_only, send_pathsend)
else:
try:
ranges = self._parse_range_header(http_range, stat_result.st_size)
except MalformedRangeHeader as exc:
return await PlainTextResponse(exc.content, status_code=400)(scope, receive, send)
except RangeNotSatisfiable as exc:
response = PlainTextResponse(status_code=416, headers={"Content-Range": f"*/{exc.max_size}"})
return await response(scope, receive, send)
if len(ranges) == 1:
start, end = ranges[0]
await self._handle_single_range(send, start, end, stat_result.st_size, send_header_only)
else:
await self._handle_multiple_ranges(send, ranges, stat_result.st_size, send_header_only)
if self.background is not None:
await self.background()
async def _handle_simple(self, send: Send, send_header_only: bool, send_pathsend: bool) -> None:
await send({"type": "http.response.start", "status": self.status_code, "headers": self.raw_headers})
if send_header_only:
await send({"type": "http.response.body", "body": b"", "more_body": False})
elif send_pathsend:
await send({"type": "http.response.pathsend", "path": str(self.path)})
else:
async with await anyio.open_file(self.path, mode="rb") as file:
more_body = True
while more_body:
chunk = await file.read(self.chunk_size)
more_body = len(chunk) == self.chunk_size
await send({"type": "http.response.body", "body": chunk, "more_body": more_body})
async def _handle_single_range(
self, send: Send, start: int, end: int, file_size: int, send_header_only: bool
) -> None:
self.headers["content-range"] = f"bytes {start}-{end - 1}/{file_size}"
self.headers["content-length"] = str(end - start)
await send({"type": "http.response.start", "status": 206, "headers": self.raw_headers})
if send_header_only:
await send({"type": "http.response.body", "body": b"", "more_body": False})
else:
async with await anyio.open_file(self.path, mode="rb") as file:
await file.seek(start)
more_body = True
while more_body:
chunk = await file.read(min(self.chunk_size, end - start))
start += len(chunk)
more_body = len(chunk) == self.chunk_size and start < end
await send({"type": "http.response.body", "body": chunk, "more_body": more_body})
async def _handle_multiple_ranges(
self,
send: Send,
ranges: list[tuple[int, int]],
file_size: int,
send_header_only: bool,
) -> None:
# In firefox and chrome, they use boundary with 95-96 bits entropy (that's roughly 13 bytes).
boundary = token_hex(13)
content_length, header_generator = self.generate_multipart(
ranges, boundary, file_size, self.headers["content-type"]
)
self.headers["content-range"] = f"multipart/byteranges; boundary={boundary}"
self.headers["content-length"] = str(content_length)
await send({"type": "http.response.start", "status": 206, "headers": self.raw_headers})
if send_header_only:
await send({"type": "http.response.body", "body": b"", "more_body": False})
else:
async with await anyio.open_file(self.path, mode="rb") as file:
for start, end in ranges:
await send({"type": "http.response.body", "body": header_generator(start, end), "more_body": True})
await file.seek(start)
while start < end:
chunk = await file.read(min(self.chunk_size, end - start))
start += len(chunk)
await send({"type": "http.response.body", "body": chunk, "more_body": True})
await send({"type": "http.response.body", "body": b"\n", "more_body": True})
await send(
{
"type": "http.response.body",
"body": f"\n--{boundary}--\n".encode("latin-1"),
"more_body": False,
}
)
def _should_use_range(self, http_if_range: str) -> bool:
return http_if_range == self.headers["last-modified"] or http_if_range == self.headers["etag"]
@classmethod
def _parse_range_header(cls, http_range: str, file_size: int) -> list[tuple[int, int]]:
ranges: list[tuple[int, int]] = []
try:
units, range_ = http_range.split("=", 1)
except ValueError:
raise MalformedRangeHeader()
units = units.strip().lower()
if units != "bytes":
raise MalformedRangeHeader("Only support bytes range")
ranges = cls._parse_ranges(range_, file_size)
if len(ranges) == 0:
raise MalformedRangeHeader("Range header: range must be requested")
if any(not (0 <= start < file_size) for start, _ in ranges):
raise RangeNotSatisfiable(file_size)
if any(start > end for start, end in ranges):
raise MalformedRangeHeader("Range header: start must be less than end")
if len(ranges) == 1:
return ranges
# Merge ranges
result: list[tuple[int, int]] = []
for start, end in ranges:
for p in range(len(result)):
p_start, p_end = result[p]
if start > p_end:
continue
elif end < p_start:
result.insert(p, (start, end)) # THIS IS NOT REACHED!
break
else:
result[p] = (min(start, p_start), max(end, p_end))
break
else:
result.append((start, end))
return result
@classmethod
def _parse_ranges(cls, range_: str, file_size: int) -> list[tuple[int, int]]:
ranges: list[tuple[int, int]] = []
for part in range_.split(","):
part = part.strip()
# If the range is empty or a single dash, we ignore it.
if not part or part == "-":
continue
# If the range is not in the format "start-end", we ignore it.
if "-" not in part:
continue
start_str, end_str = part.split("-", 1)
start_str = start_str.strip()
end_str = end_str.strip()
try:
start = int(start_str) if start_str else file_size - int(end_str)
end = int(end_str) + 1 if start_str and end_str and int(end_str) < file_size else file_size
ranges.append((start, end))
except ValueError:
# If the range is not numeric, we ignore it.
continue
return ranges
def generate_multipart(
self,
ranges: Sequence[tuple[int, int]],
boundary: str,
max_size: int,
content_type: str,
) -> tuple[int, Callable[[int, int], bytes]]:
r"""
Multipart response headers generator.
```
--{boundary}\n
Content-Type: {content_type}\n
Content-Range: bytes {start}-{end-1}/{max_size}\n
\n
..........content...........\n
--{boundary}\n
Content-Type: {content_type}\n
Content-Range: bytes {start}-{end-1}/{max_size}\n
\n
..........content...........\n
--{boundary}--\n
```
"""
boundary_len = len(boundary)
static_header_part_len = 44 + boundary_len + len(content_type) + len(str(max_size))
content_length = sum(
(len(str(start)) + len(str(end - 1)) + static_header_part_len) # Headers
+ (end - start) # Content
for start, end in ranges
) + (
5 + boundary_len # --boundary--\n
)
return (
content_length,
lambda start, end: (
f"--{boundary}\nContent-Type: {content_type}\nContent-Range: bytes {start}-{end - 1}/{max_size}\n\n"
).encode("latin-1"),
)
| FileResponse |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_function_base.py | {
"start": 59825,
"end": 61070
} | class ____(TestCase):
def test_simple(self):
# check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
# check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
def test_period(self):
# check that unwrap removes jumps greater that 255
assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2])
# check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255))
# check simple case
simple_seq = np.array([0, 75, 150, 225, 300])
wrap_seq = np.mod(simple_seq, 255)
assert_array_equal(unwrap(wrap_seq, period=255), simple_seq)
# check custom discont value
uneven_seq = np.array([0, 75, 150, 225, 300, 430])
wrap_uneven = np.mod(uneven_seq, 250)
no_discont = unwrap(wrap_uneven, period=250)
assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180])
sm_discont = unwrap(wrap_uneven, period=250, discont=140)
assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430])
assert sm_discont.dtype == wrap_uneven.dtype
@instantiate_parametrized_tests
| TestUnwrap |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 880,
"end": 963
} | class ____(Proto_CoRecurs):
def m(self) -> Impl_CoSelf: ...
| Impl_CoOtherExplicit1 |
python | walkccc__LeetCode | solutions/482. License Key Formatting/482.py | {
"start": 0,
"end": 318
} | class ____:
def licenseKeyFormatting(self, s: str, k: int) -> str:
ans = []
length = 0
for i in reversed(range(len(s))):
if s[i] == '-':
continue
if length > 0 and length % k == 0:
ans += '-'
ans += s[i].upper()
length += 1
return ''.join(reversed(ans))
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.