language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | python-jsonschema__jsonschema | jsonschema/tests/_suite.py | {
"start": 5752,
"end": 8374
} | class ____:
version: Version
subject: str
case_description: str
description: str
data: Any
schema: Mapping[str, Any] | bool
valid: bool
_remotes: referencing.jsonschema.SchemaRegistry
comment: str | None = None
def __repr__(self): # pragma: no cover
return f"<Test {self.fully_qualified_name}>"
@property
def fully_qualified_name(self): # pragma: no cover
return " > ".join( # noqa: FLY002
[
self.version.name,
self.subject,
self.case_description,
self.description,
],
)
def to_unittest_method(self, skip=lambda test: None, **kwargs):
if self.valid:
def fn(this):
self.validate(**kwargs)
else:
def fn(this):
with this.assertRaises(jsonschema.ValidationError):
self.validate(**kwargs)
fn.__name__ = "_".join(
[
"test",
_DELIMITERS.sub("_", self.subject),
_DELIMITERS.sub("_", self.case_description),
_DELIMITERS.sub("_", self.description),
],
)
reason = skip(self)
if reason is None or os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
return fn
elif os.environ.get("JSON_SCHEMA_EXPECTED_FAILURES", "0") != "0": # pragma: no cover # noqa: E501
return unittest.expectedFailure(fn)
else:
return unittest.skip(reason)(fn)
def validate(self, Validator, **kwargs):
Validator.check_schema(self.schema)
validator = Validator(
schema=self.schema,
registry=self._remotes,
**kwargs,
)
if os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0": # pragma: no cover
breakpoint() # noqa: T100
validator.validate(instance=self.data)
def validate_ignoring_errors(self, Validator): # pragma: no cover
with suppress(jsonschema.ValidationError):
self.validate(Validator=Validator)
def _someone_save_us_the_module_of_the_caller():
"""
The FQON of the module 2nd stack frames up from here.
This is intended to allow us to dynamically return test case classes that
are indistinguishable from being defined in the module that wants them.
Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
the class that really is running.
Save us all, this is all so so so so so terrible.
"""
return sys._getframe(2).f_globals["__name__"]
| _Test |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 11489,
"end": 12270
} | class ____(NetworkError):
"""
Raised when the ecosystem with the given name was not found.
"""
def __init__(self, ecosystem: str, options: Optional[Collection[str]] = None):
self.ecosystem = ecosystem
self.options = options
message = f"No ecosystem named '{ecosystem}'."
if options:
close_matches = difflib.get_close_matches(ecosystem, options, cutoff=0.6)
if close_matches:
message = f"{message} Did you mean '{', '.join(close_matches)}'?"
else:
# No close matches. Show all the options.
options_str = "\n".join(sorted(options))
message = f"{message} Options:\n{options_str}"
super().__init__(message)
| EcosystemNotFoundError |
python | spyder-ide__spyder | spyder/plugins/editor/panels/classfunctiondropdown.py | {
"start": 662,
"end": 8287
} | class ____(Panel):
"""
Class and Function/Method Dropdowns Widget.
Parameters
----------
editor : :class:`spyder.plugins.editor.widgets.codeeditor.CodeEditor`
The editor to act on.
"""
def __init__(self):
super().__init__()
# Internal data
self._tree = IntervalTree()
self._data = None
self.classes = []
self.funcs = []
# Widgets
self.class_cb = SpyderComboBoxWithIcons(self)
self.method_cb = SpyderComboBoxWithIcons(self)
# Widget setup
self.class_cb.addItem(_('<None>'), 0)
self.method_cb.addItem(_('<None>'), 0)
# The layout
hbox = QHBoxLayout()
hbox.addWidget(self.class_cb)
hbox.addWidget(self.method_cb)
hbox.setSpacing(2 * AppStyle.MarginSize)
hbox.setContentsMargins(*((AppStyle.MarginSize,) * 4))
self.setLayout(hbox)
# Signals
self.class_cb.activated.connect(self.combobox_activated)
self.method_cb.activated.connect(self.combobox_activated)
def on_install(self, editor):
"""Manages install setup of the pane."""
super().on_install(editor)
# Define the editor
self._editor = editor
# Connect signals to the editor
self._editor.sig_cursor_position_changed.connect(
self._handle_cursor_position_change_event
)
def _getVerticalSize(self):
"""Get the default height of a QComboBox."""
return self.class_cb.height() + 2 * AppStyle.MarginSize
@Slot(int, int)
def _handle_cursor_position_change_event(self, linenum, column):
self.update_selected(linenum)
def sizeHint(self):
"""Override Qt method."""
return QSize(0, self._getVerticalSize())
def showEvent(self, event):
"""
Update contents in case there is available data and the widget hasn't
been updated.
"""
if self._data is not None and self.classes == [] and self.funcs == []:
self.update_data(self._data, force=True)
super().showEvent(event)
def combobox_activated(self):
"""Move the cursor to the selected definition."""
sender = self.sender()
item = sender.itemData(sender.currentIndex())
if item:
line = item['location']['range']['start']['line'] + 1
self.editor.go_to_line(line)
if sender == self.class_cb:
self.method_cb.setCurrentIndex(0)
def update_selected(self, linenum):
"""Updates the dropdowns to reflect the current class and function."""
possible_parents = list(sorted(self._tree[linenum]))
for iv in possible_parents:
item = iv.data
kind = item.get('kind')
if kind in [SymbolKind.CLASS]:
# Update class combobox
for idx in range(self.class_cb.count()):
if self.class_cb.itemData(idx) == item:
self.class_cb.setCurrentIndex(idx)
break
else:
self.class_cb.setCurrentIndex(0)
elif kind in [SymbolKind.FUNCTION, SymbolKind.METHOD]:
# Update func combobox
for idx in range(self.method_cb.count()):
if self.method_cb.itemData(idx) == item:
self.method_cb.setCurrentIndex(idx)
break
else:
self.method_cb.setCurrentIndex(0)
else:
continue
if len(possible_parents) == 0:
self.class_cb.setCurrentIndex(0)
self.method_cb.setCurrentIndex(0)
def populate(self, combobox, data, add_parents=False):
"""
Populate the given ``combobox`` with the class or function names.
Parameters
----------
combobox : :class:`spyder.api.widgets.comboboxes.SpyderComboBox`
The combobox to populate
data : list of :class:`dict`
The data to populate with. There should be one list element per
class or function definition in the file.
add_parents : bool
Add parents to name to create a fully qualified name.
Returns
-------
None
"""
combobox.clear()
combobox.addItem(_('<None>'), 0)
model = combobox.model()
item = model.item(0)
item.setFlags(Qt.NoItemFlags)
cb_data = []
for item in data:
fqn = item['name']
# Create a list of fully-qualified names if requested
if add_parents:
begin = item['location']['range']['start']['line']
end = item['location']['range']['end']['line']
possible_parents = sorted(self._tree.overlap(begin, end),
reverse=True)
for iv in possible_parents:
if iv.begin == begin and iv.end == end:
continue
# Check if it is a real parent
p_item = iv.data
p_begin = p_item['location']['range']['start']['line']
p_end = p_item['location']['range']['end']['line']
if p_begin <= begin and p_end >= end:
fqn = p_item['name'] + "." + fqn
cb_data.append((fqn, item))
for fqn, item in cb_data:
# Set the icon (See: editortools.py)
icon = None
name = item['name']
if item['kind'] in [SymbolKind.CLASS]:
icon = ima.icon('class')
else:
if name.startswith('__'):
icon = ima.icon('private2')
elif name.startswith('_'):
icon = ima.icon('private1')
else:
icon = ima.icon('method')
# Add the combobox item
if icon is not None:
combobox.addItem(icon, fqn, item)
else:
combobox.addItem(fqn, item)
line, __ = self._editor.get_cursor_line_column()
self.update_selected(line)
def set_data(self, data):
"""Set data in internal attribute to use it when necessary."""
self._data = data
def update_data(self, data, force=False):
"""Update and process symbol data."""
if not force and data == self._data:
return
self._data = data
self._tree.clear()
self.classes = []
self.funcs = []
for item in data:
line_start = item['location']['range']['start']['line']
line_end = item['location']['range']['end']['line']
kind = item.get('kind')
block = self._editor.document().findBlockByLineNumber(line_start)
line_text = line_text = block.text() if block else ''
# The symbol finder returns classes in import statements as well
# so we filter them out
if line_start != line_end and ' import ' not in line_text:
self._tree[line_start:line_end] = item
if kind in [SymbolKind.CLASS]:
self.classes.append(item)
elif kind in [SymbolKind.FUNCTION, SymbolKind.METHOD]:
self.funcs.append(item)
self.class_cb.clear()
self.method_cb.clear()
self.populate(self.class_cb, self.classes, add_parents=False)
self.populate(self.method_cb, self.funcs, add_parents=True)
| ClassFunctionDropdown |
python | h5py__h5py | h5py/_hl/base.py | {
"start": 13053,
"end": 13826
} | class ____(Mapping):
"""
Wraps a Group, AttributeManager or DimensionManager object to provide
an immutable mapping interface.
We don't inherit directly from MutableMapping because certain
subclasses, for example DimensionManager, are read-only.
"""
def keys(self):
""" Get a view object on member names """
return KeysViewHDF5(self)
def values(self):
""" Get a view object on member objects """
return ValuesViewHDF5(self)
def items(self):
""" Get a view object on member items """
return ItemsViewHDF5(self)
def _ipython_key_completions_(self):
""" Custom tab completions for __getitem__ in IPython >=5.0. """
return sorted(self.keys())
| MappingHDF5 |
python | django__django | tests/m2m_through/models.py | {
"start": 3386,
"end": 3673
} | class ____(models.Model):
name = models.CharField(max_length=5)
subordinates = models.ManyToManyField(
"self",
through="Relationship",
through_fields=("source", "target"),
symmetrical=False,
)
class Meta:
ordering = ("pk",)
| Employee |
python | openai__gym | gym/envs/mujoco/half_cheetah_v4.py | {
"start": 191,
"end": 13251
} | class ____(MujocoEnv, utils.EzPickle):
"""
### Description
This environment is based on the work by P. Wawrzyński in
["A Cat-Like Robot Real-Time Learning to Run"](http://staff.elka.pw.edu.pl/~pwawrzyn/pub-s/0812_LSCLRR.pdf).
The HalfCheetah is a 2-dimensional robot consisting of 9 links and 8
joints connecting them (including two paws). The goal is to apply a torque
on the joints to make the cheetah run forward (right) as fast as possible,
with a positive reward allocated based on the distance moved forward and a
negative reward allocated for moving backward. The torso and head of the
cheetah are fixed, and the torque can only be applied on the other 6 joints
over the front and back thighs (connecting to the torso), shins
(connecting to the thighs) and feet (connecting to the shins).
### Action Space
The action space is a `Box(-1, 1, (6,), float32)`. An action represents the torques applied between *links*.
| Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit |
| --- | --------------------------------------- | ----------- | ----------- | -------------------------------- | ----- | ------------ |
| 0 | Torque applied on the back thigh rotor | -1 | 1 | bthigh | hinge | torque (N m) |
| 1 | Torque applied on the back shin rotor | -1 | 1 | bshin | hinge | torque (N m) |
| 2 | Torque applied on the back foot rotor | -1 | 1 | bfoot | hinge | torque (N m) |
| 3 | Torque applied on the front thigh rotor | -1 | 1 | fthigh | hinge | torque (N m) |
| 4 | Torque applied on the front shin rotor | -1 | 1 | fshin | hinge | torque (N m) |
| 5 | Torque applied on the front foot rotor | -1 | 1 | ffoot | hinge | torque (N m) |
### Observation Space
Observations consist of positional values of different body parts of the
cheetah, followed by the velocities of those individual parts (their derivatives) with all the positions ordered before all the velocities.
By default, observations do not include the x-coordinate of the cheetah's center of mass. It may
be included by passing `exclude_current_positions_from_observation=False` during construction.
In that case, the observation space will have 18 dimensions where the first dimension
represents the x-coordinate of the cheetah's center of mass.
Regardless of whether `exclude_current_positions_from_observation` was set to true or false, the x-coordinate
will be returned in `info` with key `"x_position"`.
However, by default, the observation is a `ndarray` with shape `(17,)` where the elements correspond to the following:
| Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit |
| --- | ------------------------------------ | ---- | --- | -------------------------------- | ----- | ------------------------ |
| 0 | z-coordinate of the front tip | -Inf | Inf | rootz | slide | position (m) |
| 1 | angle of the front tip | -Inf | Inf | rooty | hinge | angle (rad) |
| 2 | angle of the second rotor | -Inf | Inf | bthigh | hinge | angle (rad) |
| 3 | angle of the second rotor | -Inf | Inf | bshin | hinge | angle (rad) |
| 4 | velocity of the tip along the x-axis | -Inf | Inf | bfoot | hinge | angle (rad) |
| 5 | velocity of the tip along the y-axis | -Inf | Inf | fthigh | hinge | angle (rad) |
| 6 | angular velocity of front tip | -Inf | Inf | fshin | hinge | angle (rad) |
| 7 | angular velocity of second rotor | -Inf | Inf | ffoot | hinge | angle (rad) |
| 8 | x-coordinate of the front tip | -Inf | Inf | rootx | slide | velocity (m/s) |
| 9 | y-coordinate of the front tip | -Inf | Inf | rootz | slide | velocity (m/s) |
| 10 | angle of the front tip | -Inf | Inf | rooty | hinge | angular velocity (rad/s) |
| 11 | angle of the second rotor | -Inf | Inf | bthigh | hinge | angular velocity (rad/s) |
| 12 | angle of the second rotor | -Inf | Inf | bshin | hinge | angular velocity (rad/s) |
| 13 | velocity of the tip along the x-axis | -Inf | Inf | bfoot | hinge | angular velocity (rad/s) |
| 14 | velocity of the tip along the y-axis | -Inf | Inf | fthigh | hinge | angular velocity (rad/s) |
| 15 | angular velocity of front tip | -Inf | Inf | fshin | hinge | angular velocity (rad/s) |
| 16 | angular velocity of second rotor | -Inf | Inf | ffoot | hinge | angular velocity (rad/s) |
### Rewards
The reward consists of two parts:
- *forward_reward*: A reward of moving forward which is measured
as *`forward_reward_weight` * (x-coordinate before action - x-coordinate after action)/dt*. *dt* is
the time between actions and is dependent on the frame_skip parameter
(fixed to 5), where the frametime is 0.01 - making the
default *dt = 5 * 0.01 = 0.05*. This reward would be positive if the cheetah
runs forward (right).
- *ctrl_cost*: A cost for penalising the cheetah if it takes
actions that are too large. It is measured as *`ctrl_cost_weight` *
sum(action<sup>2</sup>)* where *`ctrl_cost_weight`* is a parameter set for the
control and has a default value of 0.1
The total reward returned is ***reward*** *=* *forward_reward - ctrl_cost* and `info` will also contain the individual reward terms
### Starting State
All observations start in state (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,) with a noise added to the
initial state for stochasticity. As seen before, the first 8 values in the
state are positional and the last 9 values are velocity. A uniform noise in
the range of [-`reset_noise_scale`, `reset_noise_scale`] is added to the positional values while a standard
normal noise with a mean of 0 and standard deviation of `reset_noise_scale` is added to the
initial velocity values of all zeros.
### Episode End
The episode truncates when the episode length is greater than 1000.
### Arguments
No additional arguments are currently supported in v2 and lower.
```
env = gym.make('HalfCheetah-v2')
```
v3 and v4 take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc.
```
env = gym.make('HalfCheetah-v4', ctrl_cost_weight=0.1, ....)
```
| Parameter | Type | Default | Description |
| -------------------------------------------- | --------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `xml_file` | **str** | `"half_cheetah.xml"` | Path to a MuJoCo model |
| `forward_reward_weight` | **float** | `1.0` | Weight for _forward_reward_ term (see section on reward) |
| `ctrl_cost_weight` | **float** | `0.1` | Weight for _ctrl_cost_ weight (see section on reward) |
| `reset_noise_scale` | **float** | `0.1` | Scale of random perturbations of initial position and velocity (see section on Starting State) |
| `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x-coordinate from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies |
### Version History
* v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3
* v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen)
* v2: All continuous control environments now use mujoco_py >= 1.50
* v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments.
* v0: Initial versions release (1.0.0)
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 20,
}
def __init__(
self,
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
**kwargs
):
utils.EzPickle.__init__(
self,
forward_reward_weight,
ctrl_cost_weight,
reset_noise_scale,
exclude_current_positions_from_observation,
**kwargs
)
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
if exclude_current_positions_from_observation:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64
)
else:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(18,), dtype=np.float64
)
MujocoEnv.__init__(
self, "half_cheetah.xml", 5, observation_space=observation_space, **kwargs
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
x_position_before = self.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.data.qpos[0]
x_velocity = (x_position_after - x_position_before) / self.dt
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
reward = forward_reward - ctrl_cost
terminated = False
info = {
"x_position": x_position_after,
"x_velocity": x_velocity,
"reward_run": forward_reward,
"reward_ctrl": -ctrl_cost,
}
if self.render_mode == "human":
self.render()
return observation, reward, terminated, False, info
def _get_obs(self):
position = self.data.qpos.flat.copy()
velocity = self.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = (
self.init_qvel
+ self._reset_noise_scale * self.np_random.standard_normal(self.model.nv)
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
assert self.viewer is not None
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| HalfCheetahEnv |
python | pytorch__pytorch | benchmarks/tensorexpr/softmax.py | {
"start": 48,
"end": 1370
} | class ____(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.dtype = dtype
self.inputs = [
self.randn(
[M, N], device=device, dtype=dtype, requires_grad=self.requires_grad
)
]
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.softmax(x, dim=-1, dtype=self.dtype)
return y
def reference(self):
return scipy.special.softmax(self.numpy(self.inputs), axis=-1)
def config(self):
return [self.M, self.N]
@staticmethod
def module():
return "softmax"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1 + 1
algorithmic_count = 3 + 1
else:
sol_count = (1 + 1) + (1 + 1)
algorithmic_count = (3 + 1) + (3 + 1)
buffer_size = self.M * self.N
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [
[480, 20],
[1 << 15, 32],
[128, 1 << 16],
]
benchmark.register_benchmark_class(SoftmaxBench)
| SoftmaxBench |
python | pypa__packaging | src/packaging/version.py | {
"start": 4763,
"end": 12622
} | class ____(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
__slots__ = ("_dev", "_epoch", "_key_cache", "_local", "_post", "_pre", "_release")
_epoch: int
_release: tuple[int, ...]
_dev: tuple[str, int] | None
_pre: tuple[str, int] | None
_post: tuple[str, int] | None
_local: LocalType | None
_key_cache: CmpKey | None
_regex = re.compile(r"\s*" + VERSION_PATTERN + r"\s*", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.fullmatch(version)
if not match:
raise InvalidVersion(f"Invalid version: {version!r}")
self._epoch = int(match.group("epoch")) if match.group("epoch") else 0
self._release = tuple(map(int, match.group("release").split(".")))
self._pre = _parse_letter_version(match.group("pre_l"), match.group("pre_n"))
self._post = _parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
)
self._dev = _parse_letter_version(match.group("dev_l"), match.group("dev_n"))
self._local = _parse_local_version(match.group("local"))
# Key which will be used for sorting
self._key_cache = None
@property
def _key(self) -> CmpKey:
if self._key_cache is None:
self._key_cache = _cmpkey(
self._epoch,
self._release,
self._pre,
self._post,
self._dev,
self._local,
)
return self._key_cache
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be round-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = [self.base_version]
# Pre-release
if self.pre is not None:
parts.append("".join(map(str, self.pre)))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
@property
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
return self._epoch
@property
def release(self) -> tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
return self._release
@property
def pre(self) -> tuple[str, int] | None:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
return self._pre
@property
def post(self) -> int | None:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._post[1] if self._post else None
@property
def dev(self) -> int | None:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._dev[1] if self._dev else None
@property
def local(self) -> str | None:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._local:
return ".".join(str(x) for x in self._local)
else:
return None
@property
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1!1.2.3dev1+abc").public
'1!1.2.3.dev1'
"""
return str(self).split("+", 1)[0]
@property
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3dev1+abc").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
release_segment = ".".join(map(str, self.release))
return f"{self.epoch}!{release_segment}" if self.epoch else release_segment
@property
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
@property
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
@property
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
@property
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
@property
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
| Version |
python | openai__gym | gym/envs/box2d/bipedal_walker.py | {
"start": 2418,
"end": 27451
} | class ____(gym.Env, EzPickle):
"""
### Description
This is a simple 4-joint walker robot environment.
There are two versions:
- Normal, with slightly uneven terrain.
- Hardcore, with ladders, stumps, pitfalls.
To solve the normal version, you need to get 300 points in 1600 time steps.
To solve the hardcore version, you need 300 points in 2000 time steps.
A heuristic is provided for testing. It's also useful to get demonstrations
to learn from. To run the heuristic:
```
python gym/envs/box2d/bipedal_walker.py
```
### Action Space
Actions are motor speed values in the [-1, 1] range for each of the
4 joints at both hips and knees.
### Observation Space
State consists of hull angle speed, angular velocity, horizontal speed,
vertical speed, position of joints and joints angular speed, legs contact
with ground, and 10 lidar rangefinder measurements. There are no coordinates
in the state vector.
### Rewards
Reward is given for moving forward, totaling 300+ points up to the far end.
If the robot falls, it gets -100. Applying motor torque costs a small
amount of points. A more optimal agent will get a better score.
### Starting State
The walker starts standing at the left end of the terrain with the hull
horizontal, and both legs in the same position with a slight knee angle.
### Episode Termination
The episode will terminate if the hull gets in contact with the ground or
if the walker exceeds the right end of the terrain length.
### Arguments
To use to the _hardcore_ environment, you need to specify the
`hardcore=True` argument like below:
```python
import gym
env = gym.make("BipedalWalker-v3", hardcore=True)
```
### Version History
- v3: returns closest lidar trace instead of furthest;
faster video recording
- v2: Count energy spent
- v1: Legs now report contact with ground; motors have higher torque and
speed; ground has higher friction; lidar rendered less nervously.
- v0: Initial version
<!-- ### References -->
### Credits
Created by Oleg Klimov
"""
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": FPS,
}
def __init__(self, render_mode: Optional[str] = None, hardcore: bool = False):
EzPickle.__init__(self, render_mode, hardcore)
self.isopen = True
self.world = Box2D.b2World()
self.terrain: List[Box2D.b2Body] = []
self.hull: Optional[Box2D.b2Body] = None
self.prev_shaping = None
self.hardcore = hardcore
self.fd_polygon = fixtureDef(
shape=polygonShape(vertices=[(0, 0), (1, 0), (1, -1), (0, -1)]),
friction=FRICTION,
)
self.fd_edge = fixtureDef(
shape=edgeShape(vertices=[(0, 0), (1, 1)]),
friction=FRICTION,
categoryBits=0x0001,
)
# we use 5.0 to represent the joints moving at maximum
# 5 x the rated speed due to impulses from ground contact etc.
low = np.array(
[
-math.pi,
-5.0,
-5.0,
-5.0,
-math.pi,
-5.0,
-math.pi,
-5.0,
-0.0,
-math.pi,
-5.0,
-math.pi,
-5.0,
-0.0,
]
+ [-1.0] * 10
).astype(np.float32)
high = np.array(
[
math.pi,
5.0,
5.0,
5.0,
math.pi,
5.0,
math.pi,
5.0,
5.0,
math.pi,
5.0,
math.pi,
5.0,
5.0,
]
+ [1.0] * 10
).astype(np.float32)
self.action_space = spaces.Box(
np.array([-1, -1, -1, -1]).astype(np.float32),
np.array([1, 1, 1, 1]).astype(np.float32),
)
self.observation_space = spaces.Box(low, high)
# state = [
# self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible.
# 2.0 * self.hull.angularVelocity / FPS,
# 0.3 * vel.x * (VIEWPORT_W / SCALE) / FPS, # Normalized to get -1..1 range
# 0.3 * vel.y * (VIEWPORT_H / SCALE) / FPS,
# self.joints[
# 0
# ].angle, # This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)
# self.joints[0].speed / SPEED_HIP,
# self.joints[1].angle + 1.0,
# self.joints[1].speed / SPEED_KNEE,
# 1.0 if self.legs[1].ground_contact else 0.0,
# self.joints[2].angle,
# self.joints[2].speed / SPEED_HIP,
# self.joints[3].angle + 1.0,
# self.joints[3].speed / SPEED_KNEE,
# 1.0 if self.legs[3].ground_contact else 0.0,
# ]
# state += [l.fraction for l in self.lidar]
self.render_mode = render_mode
self.screen: Optional[pygame.Surface] = None
self.clock = None
def _destroy(self):
if not self.terrain:
return
self.world.contactListener = None
for t in self.terrain:
self.world.DestroyBody(t)
self.terrain = []
self.world.DestroyBody(self.hull)
self.hull = None
for leg in self.legs:
self.world.DestroyBody(leg)
self.legs = []
self.joints = []
def _generate_terrain(self, hardcore):
GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5)
state = GRASS
velocity = 0.0
y = TERRAIN_HEIGHT
counter = TERRAIN_STARTPAD
oneshot = False
self.terrain = []
self.terrain_x = []
self.terrain_y = []
stair_steps, stair_width, stair_height = 0, 0, 0
original_y = 0
for i in range(TERRAIN_LENGTH):
x = i * TERRAIN_STEP
self.terrain_x.append(x)
if state == GRASS and not oneshot:
velocity = 0.8 * velocity + 0.01 * np.sign(TERRAIN_HEIGHT - y)
if i > TERRAIN_STARTPAD:
velocity += self.np_random.uniform(-1, 1) / SCALE # 1
y += velocity
elif state == PIT and oneshot:
counter = self.np_random.integers(3, 5)
poly = [
(x, y),
(x + TERRAIN_STEP, y),
(x + TERRAIN_STEP, y - 4 * TERRAIN_STEP),
(x, y - 4 * TERRAIN_STEP),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1, t.color2 = (255, 255, 255), (153, 153, 153)
self.terrain.append(t)
self.fd_polygon.shape.vertices = [
(p[0] + TERRAIN_STEP * counter, p[1]) for p in poly
]
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1, t.color2 = (255, 255, 255), (153, 153, 153)
self.terrain.append(t)
counter += 2
original_y = y
elif state == PIT and not oneshot:
y = original_y
if counter > 1:
y -= 4 * TERRAIN_STEP
elif state == STUMP and oneshot:
counter = self.np_random.integers(1, 3)
poly = [
(x, y),
(x + counter * TERRAIN_STEP, y),
(x + counter * TERRAIN_STEP, y + counter * TERRAIN_STEP),
(x, y + counter * TERRAIN_STEP),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1, t.color2 = (255, 255, 255), (153, 153, 153)
self.terrain.append(t)
elif state == STAIRS and oneshot:
stair_height = +1 if self.np_random.random() > 0.5 else -1
stair_width = self.np_random.integers(4, 5)
stair_steps = self.np_random.integers(3, 5)
original_y = y
for s in range(stair_steps):
poly = [
(
x + (s * stair_width) * TERRAIN_STEP,
y + (s * stair_height) * TERRAIN_STEP,
),
(
x + ((1 + s) * stair_width) * TERRAIN_STEP,
y + (s * stair_height) * TERRAIN_STEP,
),
(
x + ((1 + s) * stair_width) * TERRAIN_STEP,
y + (-1 + s * stair_height) * TERRAIN_STEP,
),
(
x + (s * stair_width) * TERRAIN_STEP,
y + (-1 + s * stair_height) * TERRAIN_STEP,
),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1, t.color2 = (255, 255, 255), (153, 153, 153)
self.terrain.append(t)
counter = stair_steps * stair_width
elif state == STAIRS and not oneshot:
s = stair_steps * stair_width - counter - stair_height
n = s / stair_width
y = original_y + (n * stair_height) * TERRAIN_STEP
oneshot = False
self.terrain_y.append(y)
counter -= 1
if counter == 0:
counter = self.np_random.integers(TERRAIN_GRASS / 2, TERRAIN_GRASS)
if state == GRASS and hardcore:
state = self.np_random.integers(1, _STATES_)
oneshot = True
else:
state = GRASS
oneshot = True
self.terrain_poly = []
for i in range(TERRAIN_LENGTH - 1):
poly = [
(self.terrain_x[i], self.terrain_y[i]),
(self.terrain_x[i + 1], self.terrain_y[i + 1]),
]
self.fd_edge.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_edge)
color = (76, 255 if i % 2 == 0 else 204, 76)
t.color1 = color
t.color2 = color
self.terrain.append(t)
color = (102, 153, 76)
poly += [(poly[1][0], 0), (poly[0][0], 0)]
self.terrain_poly.append((poly, color))
self.terrain.reverse()
def _generate_clouds(self):
# Sorry for the clouds, couldn't resist
self.cloud_poly = []
for i in range(TERRAIN_LENGTH // 20):
x = self.np_random.uniform(0, TERRAIN_LENGTH) * TERRAIN_STEP
y = VIEWPORT_H / SCALE * 3 / 4
poly = [
(
x
+ 15 * TERRAIN_STEP * math.sin(3.14 * 2 * a / 5)
+ self.np_random.uniform(0, 5 * TERRAIN_STEP),
y
+ 5 * TERRAIN_STEP * math.cos(3.14 * 2 * a / 5)
+ self.np_random.uniform(0, 5 * TERRAIN_STEP),
)
for a in range(5)
]
x1 = min(p[0] for p in poly)
x2 = max(p[0] for p in poly)
self.cloud_poly.append((poly, x1, x2))
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
):
super().reset(seed=seed)
self._destroy()
self.world.contactListener_bug_workaround = ContactDetector(self)
self.world.contactListener = self.world.contactListener_bug_workaround
self.game_over = False
self.prev_shaping = None
self.scroll = 0.0
self.lidar_render = 0
self._generate_terrain(self.hardcore)
self._generate_clouds()
init_x = TERRAIN_STEP * TERRAIN_STARTPAD / 2
init_y = TERRAIN_HEIGHT + 2 * LEG_H
self.hull = self.world.CreateDynamicBody(
position=(init_x, init_y), fixtures=HULL_FD
)
self.hull.color1 = (127, 51, 229)
self.hull.color2 = (76, 76, 127)
self.hull.ApplyForceToCenter(
(self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True
)
self.legs: List[Box2D.b2Body] = []
self.joints: List[Box2D.b2RevoluteJoint] = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(init_x, init_y - LEG_H / 2 - LEG_DOWN),
angle=(i * 0.05),
fixtures=LEG_FD,
)
leg.color1 = (153 - i * 25, 76 - i * 25, 127 - i * 25)
leg.color2 = (102 - i * 25, 51 - i * 25, 76 - i * 25)
rjd = revoluteJointDef(
bodyA=self.hull,
bodyB=leg,
localAnchorA=(0, LEG_DOWN),
localAnchorB=(0, LEG_H / 2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed=i,
lowerAngle=-0.8,
upperAngle=1.1,
)
self.legs.append(leg)
self.joints.append(self.world.CreateJoint(rjd))
lower = self.world.CreateDynamicBody(
position=(init_x, init_y - LEG_H * 3 / 2 - LEG_DOWN),
angle=(i * 0.05),
fixtures=LOWER_FD,
)
lower.color1 = (153 - i * 25, 76 - i * 25, 127 - i * 25)
lower.color2 = (102 - i * 25, 51 - i * 25, 76 - i * 25)
rjd = revoluteJointDef(
bodyA=leg,
bodyB=lower,
localAnchorA=(0, -LEG_H / 2),
localAnchorB=(0, LEG_H / 2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed=1,
lowerAngle=-1.6,
upperAngle=-0.1,
)
lower.ground_contact = False
self.legs.append(lower)
self.joints.append(self.world.CreateJoint(rjd))
self.drawlist = self.terrain + self.legs + [self.hull]
class LidarCallback(Box2D.b2.rayCastCallback):
def ReportFixture(self, fixture, point, normal, fraction):
if (fixture.filterData.categoryBits & 1) == 0:
return -1
self.p2 = point
self.fraction = fraction
return fraction
self.lidar = [LidarCallback() for _ in range(10)]
if self.render_mode == "human":
self.render()
return self.step(np.array([0, 0, 0, 0]))[0], {}
def step(self, action: np.ndarray):
assert self.hull is not None
# self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help
control_speed = False # Should be easier as well
if control_speed:
self.joints[0].motorSpeed = float(SPEED_HIP * np.clip(action[0], -1, 1))
self.joints[1].motorSpeed = float(SPEED_KNEE * np.clip(action[1], -1, 1))
self.joints[2].motorSpeed = float(SPEED_HIP * np.clip(action[2], -1, 1))
self.joints[3].motorSpeed = float(SPEED_KNEE * np.clip(action[3], -1, 1))
else:
self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0]))
self.joints[0].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1)
)
self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1]))
self.joints[1].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1)
)
self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2]))
self.joints[2].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1)
)
self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3]))
self.joints[3].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1)
)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
pos = self.hull.position
vel = self.hull.linearVelocity
for i in range(10):
self.lidar[i].fraction = 1.0
self.lidar[i].p1 = pos
self.lidar[i].p2 = (
pos[0] + math.sin(1.5 * i / 10.0) * LIDAR_RANGE,
pos[1] - math.cos(1.5 * i / 10.0) * LIDAR_RANGE,
)
self.world.RayCast(self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)
state = [
self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible.
2.0 * self.hull.angularVelocity / FPS,
0.3 * vel.x * (VIEWPORT_W / SCALE) / FPS, # Normalized to get -1..1 range
0.3 * vel.y * (VIEWPORT_H / SCALE) / FPS,
self.joints[0].angle,
# This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)
self.joints[0].speed / SPEED_HIP,
self.joints[1].angle + 1.0,
self.joints[1].speed / SPEED_KNEE,
1.0 if self.legs[1].ground_contact else 0.0,
self.joints[2].angle,
self.joints[2].speed / SPEED_HIP,
self.joints[3].angle + 1.0,
self.joints[3].speed / SPEED_KNEE,
1.0 if self.legs[3].ground_contact else 0.0,
]
state += [l.fraction for l in self.lidar]
assert len(state) == 24
self.scroll = pos.x - VIEWPORT_W / SCALE / 5
shaping = (
130 * pos[0] / SCALE
) # moving forward is a way to receive reward (normalized to get 300 on completion)
shaping -= 5.0 * abs(
state[0]
) # keep head straight, other than that and falling, any behavior is unpunished
reward = 0
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
for a in action:
reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1)
# normalized to about -50.0 using heuristic, more optimal agent should spend less
terminated = False
if self.game_over or pos[0] < 0:
reward = -100
terminated = True
if pos[0] > (TERRAIN_LENGTH - TERRAIN_GRASS) * TERRAIN_STEP:
terminated = True
if self.render_mode == "human":
self.render()
return np.array(state, dtype=np.float32), reward, terminated, False, {}
def render(self):
if self.render_mode is None:
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym("{self.spec.id}", render_mode="rgb_array")'
)
return
try:
import pygame
from pygame import gfxdraw
except ImportError:
raise DependencyNotInstalled(
"pygame is not installed, run `pip install gym[box2d]`"
)
if self.screen is None and self.render_mode == "human":
pygame.init()
pygame.display.init()
self.screen = pygame.display.set_mode((VIEWPORT_W, VIEWPORT_H))
if self.clock is None:
self.clock = pygame.time.Clock()
self.surf = pygame.Surface(
(VIEWPORT_W + max(0.0, self.scroll) * SCALE, VIEWPORT_H)
)
pygame.transform.scale(self.surf, (SCALE, SCALE))
pygame.draw.polygon(
self.surf,
color=(215, 215, 255),
points=[
(self.scroll * SCALE, 0),
(self.scroll * SCALE + VIEWPORT_W, 0),
(self.scroll * SCALE + VIEWPORT_W, VIEWPORT_H),
(self.scroll * SCALE, VIEWPORT_H),
],
)
for poly, x1, x2 in self.cloud_poly:
if x2 < self.scroll / 2:
continue
if x1 > self.scroll / 2 + VIEWPORT_W / SCALE:
continue
pygame.draw.polygon(
self.surf,
color=(255, 255, 255),
points=[
(p[0] * SCALE + self.scroll * SCALE / 2, p[1] * SCALE) for p in poly
],
)
gfxdraw.aapolygon(
self.surf,
[(p[0] * SCALE + self.scroll * SCALE / 2, p[1] * SCALE) for p in poly],
(255, 255, 255),
)
for poly, color in self.terrain_poly:
if poly[1][0] < self.scroll:
continue
if poly[0][0] > self.scroll + VIEWPORT_W / SCALE:
continue
scaled_poly = []
for coord in poly:
scaled_poly.append([coord[0] * SCALE, coord[1] * SCALE])
pygame.draw.polygon(self.surf, color=color, points=scaled_poly)
gfxdraw.aapolygon(self.surf, scaled_poly, color)
self.lidar_render = (self.lidar_render + 1) % 100
i = self.lidar_render
if i < 2 * len(self.lidar):
single_lidar = (
self.lidar[i]
if i < len(self.lidar)
else self.lidar[len(self.lidar) - i - 1]
)
if hasattr(single_lidar, "p1") and hasattr(single_lidar, "p2"):
pygame.draw.line(
self.surf,
color=(255, 0, 0),
start_pos=(single_lidar.p1[0] * SCALE, single_lidar.p1[1] * SCALE),
end_pos=(single_lidar.p2[0] * SCALE, single_lidar.p2[1] * SCALE),
width=1,
)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
pygame.draw.circle(
self.surf,
color=obj.color1,
center=trans * f.shape.pos * SCALE,
radius=f.shape.radius * SCALE,
)
pygame.draw.circle(
self.surf,
color=obj.color2,
center=trans * f.shape.pos * SCALE,
radius=f.shape.radius * SCALE,
)
else:
path = [trans * v * SCALE for v in f.shape.vertices]
if len(path) > 2:
pygame.draw.polygon(self.surf, color=obj.color1, points=path)
gfxdraw.aapolygon(self.surf, path, obj.color1)
path.append(path[0])
pygame.draw.polygon(
self.surf, color=obj.color2, points=path, width=1
)
gfxdraw.aapolygon(self.surf, path, obj.color2)
else:
pygame.draw.aaline(
self.surf,
start_pos=path[0],
end_pos=path[1],
color=obj.color1,
)
flagy1 = TERRAIN_HEIGHT * SCALE
flagy2 = flagy1 + 50
x = TERRAIN_STEP * 3 * SCALE
pygame.draw.aaline(
self.surf, color=(0, 0, 0), start_pos=(x, flagy1), end_pos=(x, flagy2)
)
f = [
(x, flagy2),
(x, flagy2 - 10),
(x + 25, flagy2 - 5),
]
pygame.draw.polygon(self.surf, color=(230, 51, 0), points=f)
pygame.draw.lines(
self.surf, color=(0, 0, 0), points=f + [f[0]], width=1, closed=False
)
self.surf = pygame.transform.flip(self.surf, False, True)
if self.render_mode == "human":
assert self.screen is not None
self.screen.blit(self.surf, (-self.scroll * SCALE, 0))
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
pygame.display.flip()
elif self.render_mode == "rgb_array":
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.surf)), axes=(1, 0, 2)
)[:, -VIEWPORT_W:]
def close(self):
if self.screen is not None:
import pygame
pygame.display.quit()
pygame.quit()
self.isopen = False
| BipedalWalker |
python | getsentry__sentry | src/sentry/analytics/events/eventuser_snuba_query.py | {
"start": 78,
"end": 306
} | class ____(analytics.Event):
project_ids: list[int]
query: str
query_try: int
count_rows_returned: int
count_rows_filtered: int
query_time_ms: int
analytics.register(EventUserSnubaQuery)
| EventUserSnubaQuery |
python | tiangolo__fastapi | fastapi/dependencies/models.py | {
"start": 638,
"end": 3004
} | class ____:
path_params: List[ModelField] = field(default_factory=list)
query_params: List[ModelField] = field(default_factory=list)
header_params: List[ModelField] = field(default_factory=list)
cookie_params: List[ModelField] = field(default_factory=list)
body_params: List[ModelField] = field(default_factory=list)
dependencies: List["Dependant"] = field(default_factory=list)
security_requirements: List[SecurityRequirement] = field(default_factory=list)
name: Optional[str] = None
call: Optional[Callable[..., Any]] = None
request_param_name: Optional[str] = None
websocket_param_name: Optional[str] = None
http_connection_param_name: Optional[str] = None
response_param_name: Optional[str] = None
background_tasks_param_name: Optional[str] = None
security_scopes_param_name: Optional[str] = None
security_scopes: Optional[List[str]] = None
use_cache: bool = True
path: Optional[str] = None
scope: Union[Literal["function", "request"], None] = None
@cached_property
def cache_key(self) -> DependencyCacheKey:
return (
self.call,
tuple(sorted(set(self.security_scopes or []))),
self.computed_scope or "",
)
@cached_property
def is_gen_callable(self) -> bool:
if inspect.isgeneratorfunction(self.call):
return True
dunder_call = getattr(self.call, "__call__", None) # noqa: B004
return inspect.isgeneratorfunction(dunder_call)
@cached_property
def is_async_gen_callable(self) -> bool:
if inspect.isasyncgenfunction(self.call):
return True
dunder_call = getattr(self.call, "__call__", None) # noqa: B004
return inspect.isasyncgenfunction(dunder_call)
@cached_property
def is_coroutine_callable(self) -> bool:
if inspect.isroutine(self.call):
return iscoroutinefunction(self.call)
if inspect.isclass(self.call):
return False
dunder_call = getattr(self.call, "__call__", None) # noqa: B004
return iscoroutinefunction(dunder_call)
@cached_property
def computed_scope(self) -> Union[str, None]:
if self.scope:
return self.scope
if self.is_gen_callable or self.is_async_gen_callable:
return "request"
return None
| Dependant |
python | huggingface__transformers | src/transformers/models/eomt/modular_eomt.py | {
"start": 17852,
"end": 25509
} | class ____(Mask2FormerForUniversalSegmentation):
def __init__(self, config: EomtConfig):
PreTrainedModel.__init__(self, config)
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.embeddings = EomtEmbeddings(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.query = nn.Embedding(config.num_queries, config.hidden_size)
self.layers = nn.ModuleList([EomtLayer(config) for _ in range(config.num_hidden_layers)])
self.upscale_block = EomtScaleBlock(config)
self.mask_head = EomtMaskHead(config)
self.class_predictor = nn.Linear(config.hidden_size, config.num_labels + 1)
self.grid_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.weight_dict: dict[str, float] = {
"loss_cross_entropy": config.class_weight,
"loss_mask": config.mask_weight,
"loss_dice": config.dice_weight,
}
self.criterion = EomtLoss(config=config, weight_dict=self.weight_dict)
self.register_buffer("attn_mask_probs", torch.ones(config.num_blocks))
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def get_auxiliary_logits(self):
raise AttributeError("Note needed for Eomt Model.")
def predict(self, logits: torch.Tensor):
query_tokens = logits[:, : self.config.num_queries, :]
class_logits = self.class_predictor(query_tokens)
prefix_tokens = logits[:, self.config.num_queries + self.embeddings.num_prefix_tokens :, :]
prefix_tokens = prefix_tokens.transpose(1, 2)
prefix_tokens = prefix_tokens.reshape(prefix_tokens.shape[0], -1, *self.grid_size)
query_tokens = self.mask_head(query_tokens)
prefix_tokens = self.upscale_block(prefix_tokens)
mask_logits = torch.einsum("bqc, bchw -> bqhw", query_tokens, prefix_tokens)
return mask_logits, class_logits
@staticmethod
def _disable_attention_mask(attn_mask, prob, num_query_tokens, encoder_start_tokens, device):
if prob < 1:
# Generate random queries to disable based on the probs
random_queries = torch.rand(attn_mask.shape[0], num_query_tokens, device=device) > prob
# Disable attention to the query tokens, considering the prefix tokens
attn_mask[:, :num_query_tokens, encoder_start_tokens:][random_queries] = 1
return attn_mask
@check_model_inputs()
@auto_docstring
def forward(
self,
pixel_values: Tensor,
mask_labels: Optional[list[Tensor]] = None,
class_labels: Optional[list[Tensor]] = None,
patch_offsets: Optional[list[Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> EomtForUniversalSegmentationOutput:
r"""
mask_labels (`list[torch.Tensor]`, *optional*):
list of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
patch_offsets (`list[torch.Tensor]`, *optional*):
list of tuples indicating the image index and start and end positions of patches for semantic segmentation.
"""
masks_queries_logits_per_layer, class_queries_logits_per_layer = (), ()
attention_mask = None
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
for idx, layer_module in enumerate(self.layers):
if idx == self.num_hidden_layers - self.config.num_blocks:
query = self.query.weight[None, :, :].expand(hidden_states.shape[0], -1, -1).to(hidden_states.device)
hidden_states = torch.cat((query, hidden_states), dim=1)
if idx >= self.num_hidden_layers - self.config.num_blocks and (
self.training or self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks] > 0
):
norm_hidden_states = self.layernorm(hidden_states)
masks_queries_logits, class_queries_logits = self.predict(norm_hidden_states)
masks_queries_logits_per_layer += (masks_queries_logits,)
class_queries_logits_per_layer += (class_queries_logits,)
attention_mask = torch.ones(
hidden_states.shape[0],
hidden_states.shape[1],
hidden_states.shape[1],
device=hidden_states.device,
dtype=torch.bool,
)
interpolated_logits = F.interpolate(masks_queries_logits, size=self.grid_size, mode="bilinear")
interpolated_logits = interpolated_logits.view(
interpolated_logits.size(0), interpolated_logits.size(1), -1
)
num_query_tokens = self.config.num_queries
encoder_start_tokens = num_query_tokens + self.embeddings.num_prefix_tokens
# Set attention mask for queries to focus on encoder tokens based on interpolated logits
attention_mask[:, :num_query_tokens, encoder_start_tokens:] = interpolated_logits > 0
# Disable attention mask for random query tokens.
attention_mask = self._disable_attention_mask(
attention_mask,
prob=self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks],
num_query_tokens=num_query_tokens,
encoder_start_tokens=encoder_start_tokens,
device=attention_mask.device,
)
# Expand attention mask to 4d mask.
attention_mask = attention_mask[:, None, ...].expand(-1, self.config.num_attention_heads, -1, -1)
attention_mask = attention_mask.float().masked_fill(~attention_mask, -1e9)
hidden_states = layer_module(hidden_states, attention_mask)
sequence_output = self.layernorm(hidden_states)
masks_queries_logits, class_queries_logits = self.predict(sequence_output)
masks_queries_logits_per_layer += (masks_queries_logits,)
class_queries_logits_per_layer += (class_queries_logits,)
loss = None
if mask_labels is not None and class_labels is not None:
loss = 0.0
for masks_queries_logits, class_queries_logits in zip(
masks_queries_logits_per_layer, class_queries_logits_per_layer
):
loss_dict = self.get_loss_dict(
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
mask_labels=mask_labels,
class_labels=class_labels,
auxiliary_predictions=None,
)
loss += self.get_loss(loss_dict)
return EomtForUniversalSegmentationOutput(
loss=loss,
masks_queries_logits=masks_queries_logits,
class_queries_logits=class_queries_logits,
last_hidden_state=sequence_output,
patch_offsets=patch_offsets,
)
__all__ = ["EomtConfig", "EomtPreTrainedModel", "EomtForUniversalSegmentation"]
| EomtForUniversalSegmentation |
python | getsentry__sentry | tests/sentry/event_manager/grouping/test_seer_grouping.py | {
"start": 1469,
"end": 7396
} | class ____(TestCase):
"""Test whether Seer is called during ingest and if so, how the results are used"""
def test_obeys_seer_similarity_flags(self) -> None:
existing_event = save_new_event({"message": "Dogs are great!"}, self.project)
assert existing_event.group_id
seer_result_data = SeerSimilarIssueData(
parent_hash=existing_event.get_primary_hash(),
parent_group_id=existing_event.group_id,
stacktrace_distance=0.01,
should_group=True,
)
get_seer_similar_issues_return_values: list[Any] = []
with (
patch(
"sentry.grouping.ingest.seer.should_call_seer_for_grouping",
wraps=should_call_seer_for_grouping,
) as should_call_seer_spy,
patch(
"sentry.grouping.ingest.seer.get_seer_similar_issues",
wraps=capture_results(
get_seer_similar_issues, get_seer_similar_issues_return_values
),
) as get_seer_similar_issues_spy,
patch(
"sentry.grouping.ingest.seer.get_similarity_data_from_seer",
return_value=[seer_result_data],
),
patch(
"sentry.grouping.ingest.seer._event_content_is_seer_eligible",
return_value=True,
),
):
# Project option not set
self.project.update_option("sentry:similarity_backfill_completed", None)
new_event = save_new_event({"message": "Adopt don't shop"}, self.project)
# We checked whether to make the call, but didn't go through with it
assert should_call_seer_spy.call_count == 1
assert get_seer_similar_issues_spy.call_count == 0
# Parent group not used (even though `should_group` is True)
assert new_event.group_id != existing_event.group_id
should_call_seer_spy.reset_mock()
get_seer_similar_issues_spy.reset_mock()
# Project option set
self.project.update_option("sentry:similarity_backfill_completed", int(time()))
new_event = save_new_event(
{
"exception": {
"values": [{"type": "DogsAreNeverAnError", "value": "Dogs are great!"}],
},
},
self.project,
)
# In real life just filtering on group id wouldn't be enough to guarantee us a
# single, specific GroupHash record, but since the database resets before each test,
# here it's okay
expected_grouphash = GroupHash.objects.filter(group_id=existing_event.group_id).first()
# We checked whether to make the call, and then made it
assert should_call_seer_spy.call_count == 1
assert get_seer_similar_issues_spy.call_count == 1
# Stacktrace distance returned
assert get_seer_similar_issues_return_values[0][0] == 0.01
# Parent grouphash returned and parent group used
assert get_seer_similar_issues_return_values[0][1] == expected_grouphash
assert new_event.group_id == existing_event.group_id
@patch("sentry.grouping.ingest.seer.should_call_seer_for_grouping", return_value=True)
@patch("sentry.grouping.ingest.seer.get_seer_similar_issues", return_value=EMPTY_SEER_RESULTS)
def test_calls_seer_if_no_group_found(self, mock_get_seer_similar_issues: MagicMock, _) -> None:
save_new_event({"message": "Dogs are great!"}, self.project)
assert mock_get_seer_similar_issues.call_count == 1
@patch("sentry.grouping.ingest.seer.should_call_seer_for_grouping", return_value=True)
@patch("sentry.grouping.ingest.seer.get_seer_similar_issues", return_value=EMPTY_SEER_RESULTS)
def test_bypasses_seer_if_group_found(self, mock_get_seer_similar_issues: MagicMock, _) -> None:
existing_event = save_new_event({"message": "Dogs are great!"}, self.project)
assert mock_get_seer_similar_issues.call_count == 1
new_event = save_new_event({"message": "Dogs are great!"}, self.project)
assert existing_event.group_id == new_event.group_id
assert mock_get_seer_similar_issues.call_count == 1 # didn't get called again
@patch("sentry.grouping.ingest.seer.should_call_seer_for_grouping", return_value=True)
def test_assigns_event_to_neighbor_group_if_found(self, _: MagicMock) -> None:
existing_event = save_new_event({"message": "Dogs are great!"}, self.project)
assert existing_event.group_id is not None
seer_result_data = SeerSimilarIssueData(
parent_hash=existing_event.get_primary_hash(),
parent_group_id=existing_event.group_id,
stacktrace_distance=0.01,
should_group=True,
)
with patch(
"sentry.grouping.ingest.seer.get_similarity_data_from_seer",
return_value=[seer_result_data],
) as mock_get_similarity_data:
new_event = save_new_event(get_event_data(), self.project)
assert mock_get_similarity_data.call_count == 1
assert existing_event.group_id == new_event.group_id
@patch("sentry.grouping.ingest.seer.should_call_seer_for_grouping", return_value=True)
def test_creates_new_group_if_no_neighbor_found(self, _: MagicMock) -> None:
existing_event = save_new_event({"message": "Dogs are great!"}, self.project)
with patch(
"sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=[]
) as mock_get_similarity_data:
new_event = save_new_event(get_event_data(), self.project)
assert mock_get_similarity_data.call_count == 1
assert existing_event.group_id != new_event.group_id
| SeerEventManagerGroupingTest |
python | pypa__pip | src/pip/_internal/utils/logging.py | {
"start": 7377,
"end": 12108
} | class ____(Filter):
"""
A logging Filter that excludes records from a logger (or its children).
"""
def filter(self, record: logging.LogRecord) -> bool:
# The base Filter class allows only records from a logger (or its
# children).
return not super().filter(record)
def setup_logging(verbosity: int, no_color: bool, user_log_file: str | None) -> int:
"""Configures and sets up all of the logging
Returns the requested logging level, as its integer value.
"""
# Determine the level to be logging at.
if verbosity >= 2:
level_number = logging.DEBUG
elif verbosity == 1:
level_number = VERBOSE
elif verbosity == -1:
level_number = logging.WARNING
elif verbosity == -2:
level_number = logging.ERROR
elif verbosity <= -3:
level_number = logging.CRITICAL
else:
level_number = logging.INFO
level = logging.getLevelName(level_number)
# The "root" logger should match the "console" level *unless* we also need
# to log to a user log file.
include_user_log = user_log_file is not None
if include_user_log:
additional_log_file = user_log_file
root_level = "DEBUG"
else:
additional_log_file = "/dev/null"
root_level = level
# Disable any logging besides WARNING unless we have DEBUG level logging
# enabled for vendored libraries.
vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
# Shorthands for clarity
handler_classes = {
"stream": "pip._internal.utils.logging.RichPipStreamHandler",
"file": "pip._internal.utils.logging.BetterRotatingFileHandler",
}
handlers = ["console", "console_errors", "console_subprocess"] + (
["user_log"] if include_user_log else []
)
global _stdout_console, stderr_console
_stdout_console = PipConsole(file=sys.stdout, no_color=no_color, soft_wrap=True)
_stderr_console = PipConsole(file=sys.stderr, no_color=no_color, soft_wrap=True)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip._internal.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
"restrict_to_subprocess": {
"()": "logging.Filter",
"name": subprocess_logger.name,
},
"exclude_subprocess": {
"()": "pip._internal.utils.logging.ExcludeLoggerFilter",
"name": subprocess_logger.name,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
"indent_with_timestamp": {
"()": IndentingFormatter,
"format": "%(message)s",
"add_timestamp": True,
},
},
"handlers": {
"console": {
"level": level,
"class": handler_classes["stream"],
"console": _stdout_console,
"filters": ["exclude_subprocess", "exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": handler_classes["stream"],
"console": _stderr_console,
"filters": ["exclude_subprocess"],
"formatter": "indent",
},
# A handler responsible for logging to the console messages
# from the "subprocessor" logger.
"console_subprocess": {
"level": level,
"class": handler_classes["stream"],
"console": _stderr_console,
"filters": ["restrict_to_subprocess"],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": handler_classes["file"],
"filename": additional_log_file,
"encoding": "utf-8",
"delay": True,
"formatter": "indent_with_timestamp",
},
},
"root": {
"level": root_level,
"handlers": handlers,
},
"loggers": {"pip._vendor": {"level": vendored_log_level}},
}
)
return level_number
| ExcludeLoggerFilter |
python | tensorflow__tensorflow | tensorflow/python/keras/saving/utils_v1/export_output.py | {
"start": 13287,
"end": 13656
} | class ____(_SupervisedOutput):
"""Represents the output of a supervised training process.
This class generates the appropriate signature def for exporting
training output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return unexported_signature_utils.supervised_train_signature_def
| TrainOutput |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/evaluator.py | {
"start": 943,
"end": 1108
} | class ____(operators.ColumnOperators):
def operate(self, *arg, **kw):
return None
def reverse_operate(self, *arg, **kw):
return None
| _NoObject |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chartsheet04.py | {
"start": 315,
"end": 1428
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chartsheet04.xlsx")
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [43913216, 43914752]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chartsheet.protect()
chartsheet.set_chart(chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torchgen/model.py | {
"start": 103577,
"end": 106289
} | class ____:
name: BaseOperatorName
overload_name: str
@staticmethod
def parse(op_name: str) -> OperatorName:
if "." in op_name:
name, overload_name = op_name.split(".", 1)
else:
name = op_name
overload_name = ""
r = OperatorName(name=BaseOperatorName.parse(name), overload_name=overload_name)
assert str(r) == op_name, f"{str(r)} != {op_name}"
return r
def __str__(self) -> str:
if self.overload_name:
return f"{self.name}.{self.overload_name}"
else:
return f"{self.name}"
# NB: This must be synchronized with the naming scheme in
# aten/src/ATen/templates/Operators.h
# Given a function schema "aten::op.overload(...)",
# If there is no overload name, this returns f"{op}"
# If there is an overload name, this returns f"{op}_{overload}"
def unambiguous_name(self) -> str:
if self.overload_name:
return f"{self.name}_{self.overload_name}"
else:
return f"{self.name}"
def remove_inplace(self) -> OperatorName:
return OperatorName(
name=BaseOperatorName(
base=self.name.base,
inplace=False,
dunder_method=self.name.dunder_method,
),
overload_name=self.overload_name,
)
def with_overload(self, overload: str) -> OperatorName:
return OperatorName(
name=BaseOperatorName(
base=self.name.base,
inplace=False,
dunder_method=self.name.dunder_method,
),
overload_name=overload,
)
def gets_generated_out_inplace_wrapper(
f: NativeFunction, g: NativeFunctionsGroup, b: BackendIndex
) -> bool:
return (
f.func.kind() is not SchemaKind.functional
and not b.has_kernel(f)
and b.has_kernel(g.functional)
)
# NativeFunction objects that are views (f.is_view_op returns True)
# are added into a `NativeFunctionsViewGroup`, which we can use to
# easily access the generated (optional) view_copy NativeFunction.
# It's convenient to group them together, so we pair them up in NativeFunctionsViewGroup.
# See Note [Codegen'd {view}_copy Operators]
#
# One property of this representation is that in order for a view-like op to be part of
# a NativeFunctionsViewGroup, the "aliasing" version of that view op must exist.
# There's one case where that doesn't happen: we have a non-aliasing `narrow_copy.out` op,
# but don't have corresponding aliasing `narrow.out` op.
# This means that `narrow_copy.out` won't appear as a NativeFunctionsViewGroup.
@dataclass(frozen=True)
| OperatorName |
python | pytorch__pytorch | torch/_functorch/fx_minifier.py | {
"start": 450,
"end": 564
} | class ____:
size: list[int]
stride: list[int]
dtype: torch.dtype
device: torch.device
| LoadTensorMeta |
python | matplotlib__matplotlib | lib/matplotlib/cbook.py | {
"start": 20652,
"end": 25597
} | class ____:
"""
Stack of elements with a movable cursor.
Mimics home/back/forward in a web browser.
"""
def __init__(self):
self._pos = -1
self._elements = []
def clear(self):
"""Empty the stack."""
self._pos = -1
self._elements = []
def __call__(self):
"""Return the current element, or None."""
return self._elements[self._pos] if self._elements else None
def __len__(self):
return len(self._elements)
def __getitem__(self, ind):
return self._elements[ind]
def forward(self):
"""Move the position forward and return the current element."""
self._pos = min(self._pos + 1, len(self._elements) - 1)
return self()
def back(self):
"""Move the position back and return the current element."""
self._pos = max(self._pos - 1, 0)
return self()
def push(self, o):
"""
Push *o* to the stack after the current position, and return *o*.
Discard all later elements.
"""
self._elements[self._pos + 1:] = [o]
self._pos = len(self._elements) - 1
return o
def home(self):
"""
Push the first element onto the top of the stack.
The first element is returned.
"""
return self.push(self._elements[0]) if self._elements else None
def safe_masked_invalid(x, copy=False):
x = np.array(x, subok=True, copy=copy)
if not x.dtype.isnative:
# If we have already made a copy, do the byteswap in place, else make a
# copy with the byte order swapped.
# Swap to native order.
x = x.byteswap(inplace=copy).view(x.dtype.newbyteorder('N'))
try:
xm = np.ma.masked_where(~(np.isfinite(x)), x, copy=False)
except TypeError:
if len(x.dtype.descr) == 1:
# Arrays with dtype 'object' get returned here.
# For example the 'c' kwarg of scatter, which supports multiple types.
# `plt.scatter([3, 4], [2, 5], c=[(1, 0, 0), 'y'])`
return x
else:
# In case of a dtype with multiple fields
# for example image data using a MultiNorm
try:
mask = np.empty(x.shape, dtype=np.dtype('bool, '*len(x.dtype.descr)))
for dd, dm in zip(x.dtype.descr, mask.dtype.descr):
mask[dm[0]] = ~np.isfinite(x[dd[0]])
xm = np.ma.array(x, mask=mask, copy=False)
except TypeError:
return x
return xm
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
Print loops of cyclic references in the given *objects*.
It is often useful to pass in ``gc.garbage`` to find the cycles that are
preventing some objects from being garbage collected.
Parameters
----------
objects
A list of objects to find cycles in.
outstream
The stream for output.
show_progress : bool
If True, print the number of objects reached as they are found.
"""
import gc
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % type(step))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write(f"[{key!r}]")
break
if key is next:
outstream.write(f"[key] = {val!r}")
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, types.FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write(f"Examining: {obj!r}\n")
recurse(obj, obj, {}, [])
| _Stack |
python | django__django | django/contrib/postgres/aggregates/statistics.py | {
"start": 1339,
"end": 1397
} | class ____(StatAggregate):
function = "REGR_SXX"
| RegrSXX |
python | astropy__astropy | astropy/modeling/tests/test_parameters.py | {
"start": 26638,
"end": 28226
} | class ____:
def setup_class(self):
self.x1 = np.arange(1, 10, 0.1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D(
[12, 10], [3.5, 5.2], stddev=[0.4, 0.7], n_models=2
)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array(
[
[1.0, 10],
[3.5, 5.2],
[0.4, 0.7],
]
),
)
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array(
[
[11.0, 10],
[3.5, 5.2],
[0.4, 0.7],
]
),
)
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13.0, 10.0])
np.testing.assert_almost_equal(self.gmodel.mean.value, [9.0, 5.2])
| TestMultipleParameterSets |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_utils.py | {
"start": 7582,
"end": 7641
} | class ____:
def __init__(self, v):
self.value = v
| Link |
python | ansible__ansible | lib/ansible/_internal/_templating/_lazy_containers.py | {
"start": 15778,
"end": 22616
} | class ____(_AnsibleTaggedList, _AnsibleLazyTemplateMixin):
__slots__ = _AnsibleLazyTemplateMixin._SLOTS
def __init__(self, contents: t.Iterable | _LazyValueSource, /) -> None:
if isinstance(contents, _AnsibleLazyTemplateList):
super().__init__(list.__iter__(contents))
elif isinstance(contents, _LazyValueSource):
super().__init__(contents.source)
else:
raise UnsupportedConstructionMethodError()
_AnsibleLazyTemplateMixin.__init__(self, contents)
def __getitem__(self, key: t.SupportsIndex | slice, /) -> t.Any:
if type(key) is slice: # pylint: disable=unidiomatic-typecheck
return _AnsibleLazyTemplateList(_LazyValueSource(source=super().__getitem__(key), templar=self._templar, lazy_options=self._lazy_options))
return self._proxy_or_render_lazy_value(key, super().__getitem__(key))
def __iter__(self):
for key, value in enumerate(super().__iter__()):
yield self._proxy_or_render_lazy_value(key, value)
def pop(self, idx: t.SupportsIndex = -1, /) -> t.Any:
if not self:
raise IndexError('pop from empty list')
try:
value = self[idx]
except IndexError:
raise IndexError('pop index out of range')
value = self._proxy_or_render_lazy_value(_NoKeySentinel, value)
del self[idx]
return value
def __str__(self):
return str(self.copy()._native_copy()) # inefficient, but avoids mutating the current instance (to make debugging practical)
def __repr__(self):
return repr(self.copy()._native_copy()) # inefficient, but avoids mutating the current instance (to make debugging practical)
@staticmethod
def _item_source(value: list) -> list | _LazyValueSource:
if isinstance(value, _AnsibleLazyTemplateList):
return _LazyValueSource(source=list.__iter__(value), templar=value._templar, lazy_options=value._lazy_options)
return value
def _yield_non_lazy_list_items(self):
"""
Delegate to the base collection iterator to yield the raw contents.
As of Python 3.13, generator functions are significantly faster than inline generator expressions.
"""
for v in list.__iter__(self):
yield v.value if type(v) is _LazyValue else v # pylint: disable=unidiomatic-typecheck
def _non_lazy_copy(self) -> list:
return AnsibleTagHelper.tag_copy(self, self._yield_non_lazy_list_items(), value_type=list)
@staticmethod
def _lazy_values(values: list, lazy_options: LazyOptions) -> _LazyValueSource:
return _LazyValueSource(source=(_LazyValue(v) for v in values), templar=TemplateContext.current().templar, lazy_options=lazy_options)
@staticmethod
def _proxy_or_render_other(other: t.Any | None) -> None:
"""Call `_proxy_or_render_lazy_values` if `other` is a lazy list. Used internally by comparison methods."""
if type(other) is _AnsibleLazyTemplateList: # pylint: disable=unidiomatic-typecheck
other._proxy_or_render_lazy_values()
def _proxy_or_render_lazy_values(self) -> None:
"""Ensure all `_LazyValue` wrapped values have been processed."""
for _unused in self:
pass
def __eq__(self, other):
self._proxy_or_render_lazy_values()
self._proxy_or_render_other(other)
return super().__eq__(other)
def __ne__(self, other):
self._proxy_or_render_lazy_values()
self._proxy_or_render_other(other)
return super().__ne__(other)
def __gt__(self, other):
self._proxy_or_render_lazy_values()
self._proxy_or_render_other(other)
return super().__gt__(other)
def __ge__(self, other):
self._proxy_or_render_lazy_values()
self._proxy_or_render_other(other)
return super().__ge__(other)
def __lt__(self, other):
self._proxy_or_render_lazy_values()
self._proxy_or_render_other(other)
return super().__lt__(other)
def __le__(self, other):
self._proxy_or_render_lazy_values()
self._proxy_or_render_other(other)
return super().__le__(other)
def __contains__(self, item):
self._proxy_or_render_lazy_values()
return super().__contains__(item)
def __reversed__(self):
for idx in range(self.__len__() - 1, -1, -1):
yield self[idx]
def __add__(self, other):
if self._is_not_lazy_combine_candidate(other):
# When other is lazy with a different templar/options, it cannot be lazily combined with self and a plain list must be returned.
# If other is a list, de-lazify both, otherwise just let the operation fail.
if isinstance(other, _AnsibleLazyTemplateList):
self._proxy_or_render_lazy_values()
other._proxy_or_render_lazy_values()
return super().__add__(other)
# For all other cases, the new list inherits our templar and all values stay lazy.
# We use list.__add__ to avoid implementing all its error behavior.
return _AnsibleLazyTemplateList(_LazyValueSource(source=super().__add__(other), templar=self._templar, lazy_options=self._lazy_options))
def __radd__(self, other):
if not (other_add := getattr(other, '__add__', None)):
raise TypeError(f'unsupported operand type(s) for +: {type(other).__name__!r} and {type(self).__name__!r}') from None
return _AnsibleLazyTemplateList(_LazyValueSource(source=other_add(self), templar=self._templar, lazy_options=self._lazy_options))
def __mul__(self, other):
return _AnsibleLazyTemplateList(_LazyValueSource(source=super().__mul__(other), templar=self._templar, lazy_options=self._lazy_options))
def __rmul__(self, other):
return _AnsibleLazyTemplateList(_LazyValueSource(source=super().__rmul__(other), templar=self._templar, lazy_options=self._lazy_options))
def index(self, *args, **kwargs) -> int:
self._proxy_or_render_lazy_values()
return super().index(*args, **kwargs)
def remove(self, *args, **kwargs) -> None:
self._proxy_or_render_lazy_values()
super().remove(*args, **kwargs)
def sort(self, *args, **kwargs) -> None:
self._proxy_or_render_lazy_values()
super().sort(*args, **kwargs)
def __deepcopy__(self, memo):
return _AnsibleLazyTemplateList(
_LazyValueSource(
source=(copy.deepcopy(v) for v in super().__iter__()),
templar=copy.deepcopy(self._templar),
lazy_options=copy.deepcopy(self._lazy_options),
)
)
@t.final # consumers of lazy collections rely heavily on the concrete types being final
| _AnsibleLazyTemplateList |
python | pytorch__pytorch | torch/_export/db/examples/nested_function.py | {
"start": 41,
"end": 491
} | class ____(torch.nn.Module):
"""
Nested functions are traced through. Side effects on global captures
are not supported though.
"""
def forward(self, a, b):
x = a + b
z = a - b
def closure(y):
nonlocal x
x += 1
return x * y + z
return closure(x)
example_args = (torch.randn(3, 2), torch.randn(2))
tags = {"python.closure"}
model = NestedFunction()
| NestedFunction |
python | google__pytype | pytype/pyi/types.py | {
"start": 2008,
"end": 2093
} | class ____: # pylint: disable=redefined-builtin
pass
@dataclasses.dataclass
| Ellipsis |
python | huggingface__transformers | src/transformers/models/segformer/image_processing_segformer.py | {
"start": 2091,
"end": 22482
} | class ____(BaseImageProcessor):
r"""
Constructs a Segformer image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 512, "width": 512}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = SegformerImageProcessorKwargs
@filter_out_non_signature_kwargs(extra=INIT_SERVICE_KWARGS)
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_reduce_labels: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 512, "width": 512}
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.do_reduce_labels = do_reduce_labels
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.reduce_label
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
# Avoid using underflow conversion
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def _preprocess(
self,
image: ImageInput,
do_reduce_labels: bool,
do_resize: bool,
do_rescale: bool,
do_normalize: bool,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
rescale_factor: Optional[float] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(
self,
image: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single image."""
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(
image=image,
do_reduce_labels=False,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
input_data_format=input_data_format,
)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(
self,
segmentation_map: ImageInput,
do_reduce_labels: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
# Add channel dimension if missing - needed for certain transformations
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
# reduce zero label if needed
segmentation_map = self._preprocess(
image=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
resample=PILImageResampling.NEAREST,
size=size,
do_rescale=False,
do_normalize=False,
input_data_format=input_data_format,
)
# Remove extra channel dimension if added for processing
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
def __call__(self, images, segmentation_maps=None, **kwargs):
"""
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
"""
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_reduce_labels: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after `resize` is applied.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
resample = resample if resample is not None else self.resample
size = size if size is not None else self.size
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
images = [
self._preprocess_image(
image=img,
do_resize=do_resize,
resample=resample,
size=size,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
input_data_format=input_data_format,
)
for img in images
]
data = {"pixel_values": images}
if segmentation_maps is not None:
segmentation_maps = [
self._preprocess_mask(
segmentation_map=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
size=size,
input_data_format=input_data_format,
)
for segmentation_map in segmentation_maps
]
data["labels"] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->Segformer
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`SegformerForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["SegformerImageProcessor"]
| SegformerImageProcessor |
python | openai__openai-python | src/openai/types/beta/thread.py | {
"start": 633,
"end": 936
} | class ____(BaseModel):
vector_store_ids: Optional[List[str]] = None
"""
The
[vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
attached to this thread. There can be a maximum of 1 vector store attached to
the thread.
"""
| ToolResourcesFileSearch |
python | mlflow__mlflow | mlflow/utils/search_utils.py | {
"start": 40943,
"end": 47357
} | class ____(SearchUtils):
VALID_SEARCH_ATTRIBUTE_KEYS = {"name", "creation_time", "last_update_time"}
VALID_ORDER_BY_ATTRIBUTE_KEYS = {"name", "experiment_id", "creation_time", "last_update_time"}
NUMERIC_ATTRIBUTES = {"creation_time", "last_update_time"}
@classmethod
def _invalid_statement_token_search_experiments(cls, token):
if (
isinstance(token, Comparison)
or token.is_whitespace
or token.match(ttype=TokenType.Keyword, values=["AND"])
):
return False
return True
@classmethod
def _process_statement(cls, statement):
tokens = _join_in_comparison_tokens(statement.tokens)
invalids = list(filter(cls._invalid_statement_token_search_experiments, tokens))
if len(invalids) > 0:
invalid_clauses = ", ".join(map(str, invalids))
raise MlflowException.invalid_parameter_value(
f"Invalid clause(s) in filter string: {invalid_clauses}"
)
return [cls._get_comparison(t) for t in tokens if isinstance(t, Comparison)]
@classmethod
def _get_identifier(cls, identifier, valid_attributes):
tokens = identifier.split(".", maxsplit=1)
if len(tokens) == 1:
key = tokens[0]
identifier = cls._ATTRIBUTE_IDENTIFIER
else:
entity_type, key = tokens
valid_entity_types = ("attribute", "tag", "tags")
if entity_type not in valid_entity_types:
raise MlflowException.invalid_parameter_value(
f"Invalid entity type '{entity_type}'. "
f"Valid entity types are {valid_entity_types}"
)
identifier = cls._valid_entity_type(entity_type)
key = cls._trim_backticks(cls._strip_quotes(key))
if identifier == cls._ATTRIBUTE_IDENTIFIER and key not in valid_attributes:
raise MlflowException.invalid_parameter_value(
f"Invalid attribute key '{key}' specified. Valid keys are '{valid_attributes}'"
)
return {"type": identifier, "key": key}
@classmethod
def _get_comparison(cls, comparison):
stripped_comparison = [token for token in comparison.tokens if not token.is_whitespace]
cls._validate_comparison(stripped_comparison)
left, comparator, right = stripped_comparison
comp = cls._get_identifier(left.value, cls.VALID_SEARCH_ATTRIBUTE_KEYS)
comp["comparator"] = comparator.value
comp["value"] = cls._get_value(comp.get("type"), comp.get("key"), right)
return comp
@classmethod
def parse_order_by_for_search_experiments(cls, order_by):
token_value, is_ascending = cls._parse_order_by_string(order_by)
identifier = cls._get_identifier(token_value.strip(), cls.VALID_ORDER_BY_ATTRIBUTE_KEYS)
return identifier["type"], identifier["key"], is_ascending
@classmethod
def is_attribute(cls, key_type, comparator):
if key_type == cls._ATTRIBUTE_IDENTIFIER:
if comparator not in cls.VALID_STRING_ATTRIBUTE_COMPARATORS:
raise MlflowException(
f"Invalid comparator '{comparator}' not one of "
f"'{cls.VALID_STRING_ATTRIBUTE_COMPARATORS}'"
)
return True
return False
@classmethod
def _does_experiment_match_clause(cls, experiment, sed):
key_type = sed.get("type")
key = sed.get("key")
value = sed.get("value")
comparator = sed.get("comparator").upper()
if cls.is_string_attribute(key_type, key, comparator):
lhs = getattr(experiment, key)
elif cls.is_numeric_attribute(key_type, key, comparator):
lhs = getattr(experiment, key)
value = float(value)
elif cls.is_tag(key_type, comparator):
if key not in experiment.tags:
return False
lhs = experiment.tags.get(key, None)
if lhs is None:
return experiment
else:
raise MlflowException(
f"Invalid search expression type '{key_type}'", error_code=INVALID_PARAMETER_VALUE
)
return SearchUtils.get_comparison_func(comparator)(lhs, value)
@classmethod
def filter(cls, experiments, filter_string):
if not filter_string:
return experiments
parsed = cls.parse_search_filter(filter_string)
def experiment_matches(experiment):
return all(cls._does_experiment_match_clause(experiment, s) for s in parsed)
return list(filter(experiment_matches, experiments))
@classmethod
def _get_sort_key(cls, order_by_list):
order_by = []
parsed_order_by = map(cls.parse_order_by_for_search_experiments, order_by_list)
for type_, key, ascending in parsed_order_by:
if type_ == "attribute":
order_by.append((key, ascending))
else:
raise MlflowException.invalid_parameter_value(f"Invalid order_by entity: {type_}")
# Add a tie-breaker
if not any(key == "experiment_id" for key, _ in order_by):
order_by.append(("experiment_id", False))
# https://stackoverflow.com/a/56842689
class _Sorter:
def __init__(self, obj, ascending):
self.obj = obj
self.ascending = ascending
# Only need < and == are needed for use as a key parameter in the sorted function
def __eq__(self, other):
return other.obj == self.obj
def __lt__(self, other):
if self.obj is None:
return False
elif other.obj is None:
return True
elif self.ascending:
return self.obj < other.obj
else:
return other.obj < self.obj
def _apply_sorter(experiment, key, ascending):
attr = getattr(experiment, key)
return _Sorter(attr, ascending)
return lambda experiment: tuple(_apply_sorter(experiment, k, asc) for (k, asc) in order_by)
@classmethod
def sort(cls, experiments, order_by_list):
return sorted(experiments, key=cls._get_sort_key(order_by_list))
# https://stackoverflow.com/a/56842689
| SearchExperimentsUtils |
python | openai__gym | gym/wrappers/time_limit.py | {
"start": 103,
"end": 2526
} | class ____(gym.Wrapper):
"""This wrapper will issue a `truncated` signal if a maximum number of timesteps is exceeded.
If a truncation is not defined inside the environment itself, this is the only place that the truncation signal is issued.
Critically, this is different from the `terminated` signal that originates from the underlying environment as part of the MDP.
Example:
>>> from gym.envs.classic_control import CartPoleEnv
>>> from gym.wrappers import TimeLimit
>>> env = CartPoleEnv()
>>> env = TimeLimit(env, max_episode_steps=1000)
"""
def __init__(
self,
env: gym.Env,
max_episode_steps: Optional[int] = None,
):
"""Initializes the :class:`TimeLimit` wrapper with an environment and the number of steps after which truncation will occur.
Args:
env: The environment to apply the wrapper
max_episode_steps: An optional max episode steps (if ``Ǹone``, ``env.spec.max_episode_steps`` is used)
"""
super().__init__(env)
if max_episode_steps is None and self.env.spec is not None:
max_episode_steps = env.spec.max_episode_steps
if self.env.spec is not None:
self.env.spec.max_episode_steps = max_episode_steps
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def step(self, action):
"""Steps through the environment and if the number of steps elapsed exceeds ``max_episode_steps`` then truncate.
Args:
action: The environment step action
Returns:
The environment step ``(observation, reward, terminated, truncated, info)`` with `truncated=True`
if the number of steps elapsed >= max episode steps
"""
observation, reward, terminated, truncated, info = self.env.step(action)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
truncated = True
return observation, reward, terminated, truncated, info
def reset(self, **kwargs):
"""Resets the environment with :param:`**kwargs` and sets the number of steps elapsed to zero.
Args:
**kwargs: The kwargs to reset the environment with
Returns:
The reset environment
"""
self._elapsed_steps = 0
return self.env.reset(**kwargs)
| TimeLimit |
python | langchain-ai__langchain | libs/partners/huggingface/langchain_huggingface/embeddings/huggingface.py | {
"start": 353,
"end": 6439
} | class ____(BaseModel, Embeddings):
"""HuggingFace sentence_transformers embedding models.
To use, you should have the `sentence_transformers` python package installed.
Example:
```python
from langchain_huggingface import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {"device": "cpu"}
encode_kwargs = {"normalize_embeddings": False}
hf = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
)
```
"""
model_name: str = Field(
default="sentence-transformers/all-mpnet-base-v2", alias="model"
)
"""Model name to use."""
cache_folder: str | None = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the Sentence Transformer model, such as `device`,
`prompts`, `default_prompt_name`, `revision`, `trust_remote_code`, or `token`.
See also the Sentence Transformer documentation: https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer"""
encode_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass when calling the `encode` method for the documents of
the Sentence Transformer model, such as `prompt_name`, `prompt`, `batch_size`,
`precision`, `normalize_embeddings`, and more.
See also the Sentence Transformer documentation: https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer.encode"""
query_encode_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass when calling the `encode` method for the query of
the Sentence Transformer model, such as `prompt_name`, `prompt`, `batch_size`,
`precision`, `normalize_embeddings`, and more.
See also the Sentence Transformer documentation: https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer.encode"""
multi_process: bool = False
"""Run encode() on multiple GPUs."""
show_progress: bool = False
"""Whether to show a progress bar."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers # type: ignore[import]
except ImportError as exc:
msg = (
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence-transformers`."
)
raise ImportError(msg) from exc
if self.model_kwargs.get("backend", "torch") == "ipex":
if not is_optimum_intel_available() or not is_ipex_available():
msg = f"Backend: ipex {IMPORT_ERROR.format('optimum[ipex]')}"
raise ImportError(msg)
if is_optimum_intel_version("<", _MIN_OPTIMUM_VERSION):
msg = (
f"Backend: ipex requires optimum-intel>="
f"{_MIN_OPTIMUM_VERSION}. You can install it with pip: "
"`pip install --upgrade --upgrade-strategy eager "
"`optimum[ipex]`."
)
raise ImportError(msg)
from optimum.intel import IPEXSentenceTransformer # type: ignore[import]
model_cls = IPEXSentenceTransformer
else:
model_cls = sentence_transformers.SentenceTransformer
self._client = model_cls(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
model_config = ConfigDict(
extra="forbid",
protected_namespaces=(),
populate_by_name=True,
)
def _embed(
self, texts: list[str], encode_kwargs: dict[str, Any]
) -> list[list[float]]:
"""Embed a text using the HuggingFace transformer model.
Args:
texts: The list of texts to embed.
encode_kwargs: Keyword arguments to pass when calling the
`encode` method for the documents of the SentenceTransformer
encode method.
Returns:
List of embeddings, one for each text.
"""
import sentence_transformers # type: ignore[import]
texts = [x.replace("\n", " ") for x in texts]
if self.multi_process:
pool = self._client.start_multi_process_pool()
embeddings = self._client.encode_multi_process(texts, pool)
sentence_transformers.SentenceTransformer.stop_multi_process_pool(pool)
else:
embeddings = self._client.encode(
texts,
show_progress_bar=self.show_progress,
**encode_kwargs,
)
if isinstance(embeddings, list):
msg = (
"Expected embeddings to be a Tensor or a numpy array, "
"got a list instead."
)
raise TypeError(msg)
return embeddings.tolist() # type: ignore[return-type]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._embed(texts, self.encode_kwargs)
def embed_query(self, text: str) -> list[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embed_kwargs = (
self.query_encode_kwargs
if len(self.query_encode_kwargs) > 0
else self.encode_kwargs
)
return self._embed([text], embed_kwargs)[0]
| HuggingFaceEmbeddings |
python | ray-project__ray | python/ray/serve/_private/common.py | {
"start": 29765,
"end": 30363
} | class ____:
bundles: List[Dict[str, float]]
strategy: str
target_node_id: str
name: str
runtime_env: Optional[str] = None
# This error is used to raise when a by-value DeploymentResponse is converted to an
# ObjectRef.
OBJ_REF_NOT_SUPPORTED_ERROR = RuntimeError(
"Converting by-value DeploymentResponses to ObjectRefs is not supported. "
"Use handle.options(_by_reference=True) to enable it."
)
RUNNING_REQUESTS_KEY = "running_requests"
ONGOING_REQUESTS_KEY = "ongoing_requests"
QUEUED_REQUESTS_KEY = "queued_requests"
@dataclass(order=True)
| CreatePlacementGroupRequest |
python | falconry__falcon | tests/test_utils.py | {
"start": 48021,
"end": 52225
} | class ____:
class CustomContextType(structures.Context):
def __init__(self):
pass
@pytest.mark.parametrize(
'context_type',
[
CustomContextType,
structures.Context,
],
)
def test_attributes(self, context_type):
ctx = context_type()
ctx.foo = 'bar'
ctx.details = None
ctx._cache = {}
assert ctx.foo == 'bar'
assert ctx.details is None
assert ctx._cache == {}
with pytest.raises(AttributeError):
ctx.cache_strategy
@pytest.mark.parametrize(
'context_type',
[
CustomContextType,
structures.Context,
],
)
def test_items_from_attributes(self, context_type):
ctx = context_type()
ctx.foo = 'bar'
ctx.details = None
ctx._cache = {}
assert ctx['foo'] == 'bar'
assert ctx['details'] is None
assert ctx['_cache'] == {}
with pytest.raises(KeyError):
ctx['cache_strategy']
assert 'foo' in ctx
assert '_cache' in ctx
assert 'cache_strategy' not in ctx
@pytest.mark.parametrize(
'context_type',
[
CustomContextType,
structures.Context,
],
)
def test_attributes_from_items(self, context_type):
ctx = context_type()
ctx['foo'] = 'bar'
ctx['details'] = None
ctx['_cache'] = {}
ctx['cache_strategy'] = 'lru'
assert ctx['cache_strategy'] == 'lru'
del ctx['cache_strategy']
assert ctx['foo'] == 'bar'
assert ctx['details'] is None
assert ctx['_cache'] == {}
with pytest.raises(KeyError):
ctx['cache_strategy']
@pytest.mark.parametrize(
'context_type,type_name',
[
(CustomContextType, 'CustomContextType'),
(structures.Context, 'Context'),
],
)
def test_dict_interface(self, context_type, type_name):
ctx = context_type()
ctx['foo'] = 'bar'
ctx['details'] = None
ctx[1] = 'one'
ctx[2] = 'two'
assert ctx == {'foo': 'bar', 'details': None, 1: 'one', 2: 'two'}
assert ctx != {'bar': 'foo', 'details': None, 1: 'one', 2: 'two'}
assert ctx != {}
copy = ctx.copy()
assert isinstance(copy, context_type)
assert copy == ctx
assert copy == {'foo': 'bar', 'details': None, 1: 'one', 2: 'two'}
copy.pop('foo')
assert copy != ctx
assert set(key for key in ctx) == {'foo', 'details', 1, 2}
assert ctx.get('foo') == 'bar'
assert ctx.get('bar') is None
assert ctx.get('bar', frozenset('hello')) == frozenset('hello')
false = ctx.get('bar', False)
assert isinstance(false, bool)
assert not false
assert len(ctx) == 4
assert ctx.pop(3) is None
assert ctx.pop(3, 'not found') == 'not found'
assert ctx.pop('foo') == 'bar'
assert ctx.pop(1) == 'one'
assert ctx.pop(2) == 'two'
assert len(ctx) == 1
assert repr(ctx) == type_name + "({'details': None})"
assert str(ctx) == type_name + "({'details': None})"
assert f'{ctx}' == type_name + "({'details': None})"
with pytest.raises(TypeError):
{ctx: ctx}
ctx.clear()
assert ctx == {}
assert len(ctx) == 0
ctx['key'] = 'value'
assert ctx.popitem() == ('key', 'value')
ctx.setdefault('numbers', []).append(1)
ctx.setdefault('numbers', []).append(2)
ctx.setdefault('numbers', []).append(3)
assert ctx['numbers'] == [1, 2, 3]
@pytest.mark.parametrize(
'context_type',
[
CustomContextType,
structures.Context,
],
)
def test_keys_and_values(self, context_type):
ctx = context_type()
ctx.update((number, number**2) for number in range(1, 5))
assert set(ctx.keys()) == {1, 2, 3, 4}
assert set(ctx.values()) == {1, 4, 9, 16}
assert set(ctx.items()) == {(1, 1), (2, 4), (3, 9), (4, 16)}
| TestContextType |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 835988,
"end": 836384
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("PinnedDiscussion", graphql_name="node")
"""The item at the end of the edge."""
| PinnedDiscussionEdge |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance1.py | {
"start": 287,
"end": 399
} | class ____:
class_var1: int
def __init__(self) -> None:
self.property: None = None
| UnrelatedClass |
python | django__django | django/db/models/query.py | {
"start": 83900,
"end": 84052
} | class ____(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
| InstanceCheckMeta |
python | sqlalchemy__sqlalchemy | examples/versioned_history/test_versioning.py | {
"start": 29901,
"end": 30038
} | class ____(TestVersioningNewBase, unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
| TestVersioningNewBaseUnittest |
python | pytorch__pytorch | torch/distributed/fsdp/api.py | {
"start": 18826,
"end": 18975
} | class ____:
state_dict_type: StateDictType
state_dict_config: StateDictConfig
optim_state_dict_config: OptimStateDictConfig
| StateDictSettings |
python | kamyu104__LeetCode-Solutions | Python/closest-binary-search-tree-value-ii.py | {
"start": 33,
"end": 1916
} | class ____(object):
def closestKValues(self, root, target, k):
"""
:type root: TreeNode
:type target: float
:type k: int
:rtype: List[int]
"""
# Helper to make a stack to the next node.
def nextNode(stack, child1, child2):
if stack:
if child2(stack):
stack.append(child2(stack))
while child1(stack):
stack.append(child1(stack))
else:
child = stack.pop()
while stack and child is child2(stack):
child = stack.pop()
# The forward or backward iterator.
backward = lambda stack: stack[-1].left
forward = lambda stack: stack[-1].right
# Build the stack to the closest node.
stack = []
while root:
stack.append(root)
root = root.left if target < root.val else root.right
dist = lambda node: abs(node.val - target)
forward_stack = stack[:stack.index(min(stack, key=dist))+1]
# Get the stack to the next smaller node.
backward_stack = list(forward_stack)
nextNode(backward_stack, backward, forward)
# Get the closest k values by advancing the iterators of the stacks.
result = []
for _ in xrange(k):
if forward_stack and \
(not backward_stack or dist(forward_stack[-1]) < dist(backward_stack[-1])):
result.append(forward_stack[-1].val)
nextNode(forward_stack, forward, backward)
elif backward_stack and \
(not forward_stack or dist(backward_stack[-1]) <= dist(forward_stack[-1])):
result.append(backward_stack[-1].val)
nextNode(backward_stack, backward, forward)
return result
| Solution |
python | django__django | tests/mail/custombackend.py | {
"start": 448,
"end": 606
} | class ____(BaseEmailBackend):
def send_messages(self, email_messages):
raise ValueError("FailingEmailBackend is doomed to fail.")
| FailingEmailBackend |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 36069,
"end": 36325
} | class ____(BaseCategories):
params = [get_benchmark_shapes("TimeRemoveCategories")]
param_names = ["shape"]
def time_remove_categories(self, shape):
execute(self.ts.cat.remove_categories(self.ts.cat.categories[::2]))
| TimeRemoveCategories |
python | pytorch__pytorch | test/jit/test_ignorable_args.py | {
"start": 481,
"end": 2334
} | class ____(JitTestCase):
def test_slice_ignorable_args_for_slice(self):
graph_str = """graph():
%13 : int = prim::Constant[value=0]()
%10 : bool = prim::Constant[value=0]()
%8 : NoneType = prim::Constant()
%0 : int = prim::Constant[value=1]()
%1 : int = prim::Constant[value=2]()
%2 : int = prim::Constant[value=3]()
%3 : int = prim::Constant[value=4]()
%4 : int = prim::Constant[value=9]()
%5 : int[] = prim::ListConstruct(%0, %1, %2, %3, %4, %4)
%6 : int[] = prim::ListConstruct(%0, %1, %2, %3, %4, %4)
%7 : int[][] = prim::ListConstruct(%5, %6)
%val.1 : Tensor = aten::tensor(%7, %8, %8, %10)
%16 : Tensor = aten::slice(%val.1, %13, %1, %8, %0)
%20 : Tensor = aten::slice(%16, %0, %8, %0, %0)
return (%20)"""
graph = parse_ir(graph_str)
function = self.createFunctionFromGraph(graph)
function_copy = self.getExportImportCopy(function)
src = str(function.code)
# For a signature:
# aten::slice(Tensor self, int dim, int start, int end, int step) -> Tensor
# We ignore trailing arguments after start=2 for dim 0
# and after end=1 for dim 1
# because in %16, %15 and %0 are default values for the schema.
FileCheck().check(
"torch.slice(torch.slice(torch.tensor(_0), 0, 2), 1, None, 1)"
).run(src)
self.assertEqual(function(), function_copy())
def test_add_out_ignorable_args(self):
@torch.jit.script
def fn(x: torch.Tensor, y: torch.Tensor):
torch.add(x, y, out=y)
FileCheck().check("torch.add(x, y, out=y)").run(fn.code)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestIgnorableArgs |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 14369,
"end": 15560
} | class ____(graphene.ObjectType, AssetEventMixin):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneDisplayableEvent)
name = "FailedToMaterializeEvent"
materializationFailureReason = graphene.NonNull(GrapheneAssetMaterializationFailureReason)
materializationFailureType = graphene.NonNull(GrapheneAssetMaterializationFailureType)
def __init__(self, event: EventLogEntry):
dagster_event = check.not_none(event.dagster_event)
self.failed_materialization = (
dagster_event.asset_failed_to_materialize_data.asset_materialization_failure
)
super().__init__(
**_construct_asset_event_metadata_params(event, self.failed_materialization),
)
AssetEventMixin.__init__(
self,
event=event,
metadata=self.failed_materialization,
)
def resolve_materializationFailureReason(self, _graphene_info: ResolveInfo):
return self.failed_materialization.reason
def resolve_materializationFailureType(self, _graphene_info: ResolveInfo):
return self.failed_materialization.failure_type
| GrapheneFailedToMaterializeEvent |
python | huggingface__transformers | src/transformers/models/timesfm/modeling_timesfm.py | {
"start": 4410,
"end": 5137
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
TimesFmRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| TimesFmRMSNorm |
python | spulec__freezegun | tests/test_datetimes.py | {
"start": 609,
"end": 9668
} | class ____:
"""Temporarily change the locale."""
def __init__(self, *targets: str):
self.targets = targets
def __enter__(self) -> None:
self.old = locale.setlocale(locale.LC_ALL)
for target in self.targets:
try:
locale.setlocale(locale.LC_ALL, target)
return
except locale.Error:
pass
msg = 'could not set locale to any of: %s' % ', '.join(self.targets)
raise SkipTest(msg)
def __exit__(self, *args: Any) -> None:
locale.setlocale(locale.LC_ALL, self.old)
# Small sample of locales where '%x' expands to a dd/mm/yyyy string,
# which can cause trouble when parsed with dateutil.
_dd_mm_yyyy_locales = ['da_DK.UTF-8', 'de_DE.UTF-8', 'fr_FR.UTF-8']
def test_simple_api() -> None:
# time to freeze is always provided in UTC
freezer = freeze_time("2012-01-14")
# expected timestamp must be a timestamp, corresponding to 2012-01-14 UTC
local_time = datetime.datetime(2012, 1, 14)
utc_time = local_time - datetime.timedelta(seconds=time.timezone)
expected_timestamp = time.mktime(utc_time.timetuple())
freezer.start()
assert time.time() == expected_timestamp
assert time.monotonic() >= 0.0
assert time.perf_counter() >= 0.0
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
assert datetime.datetime.utcnow() == datetime.datetime(2012, 1, 14)
assert datetime.date.today() == datetime.date(2012, 1, 14)
assert datetime.datetime.now().today() == datetime.datetime(2012, 1, 14)
freezer.stop()
assert time.time() != expected_timestamp
assert time.monotonic() >= 0.0
assert time.perf_counter() >= 0.0
assert datetime.datetime.now() != datetime.datetime(2012, 1, 14)
assert datetime.datetime.utcnow() != datetime.datetime(2012, 1, 14)
freezer = freeze_time("2012-01-10 13:52:01")
freezer.start()
assert datetime.datetime.now() == datetime.datetime(2012, 1, 10, 13, 52, 1)
freezer.stop()
def test_tz_offset() -> None:
freezer = freeze_time("2012-01-14 03:21:34", tz_offset=-4)
# expected timestamp must be a timestamp,
# corresponding to 2012-01-14 03:21:34 UTC
# and it doesn't depend on tz_offset
local_time = datetime.datetime(2012, 1, 14, 3, 21, 34)
utc_time = local_time - datetime.timedelta(seconds=time.timezone)
expected_timestamp = time.mktime(utc_time.timetuple())
freezer.start()
assert datetime.datetime.now() == datetime.datetime(2012, 1, 13, 23, 21, 34)
assert datetime.datetime.utcnow() == datetime.datetime(2012, 1, 14, 3, 21, 34)
assert time.time() == expected_timestamp
freezer.stop()
def test_timestamp_tz_offset() -> None:
freezer = freeze_time(datetime.datetime.fromtimestamp(1), tz_offset=-1)
freezer.start()
t = datetime.datetime.now().timestamp()
assert datetime.datetime.fromtimestamp(t).timestamp() == t
freezer.stop()
def test_timedelta_tz_offset() -> None:
freezer = freeze_time("2012-01-14 03:21:34",
tz_offset=-datetime.timedelta(hours=3, minutes=30))
freezer.start()
assert datetime.datetime.now() == datetime.datetime(2012, 1, 13, 23, 51, 34)
assert datetime.datetime.utcnow() == datetime.datetime(2012, 1, 14, 3, 21, 34)
freezer.stop()
def test_tz_offset_with_today() -> None:
freezer = freeze_time("2012-01-14", tz_offset=-4)
freezer.start()
assert datetime.date.today() == datetime.date(2012, 1, 13)
freezer.stop()
assert datetime.date.today() != datetime.date(2012, 1, 13)
def test_zero_tz_offset_with_time() -> None:
# we expect the system to behave like a system with UTC timezone
# at the beginning of the Epoch
freezer = freeze_time('1970-01-01')
freezer.start()
assert datetime.date.today() == datetime.date(1970, 1, 1)
assert datetime.datetime.now() == datetime.datetime(1970, 1, 1)
assert datetime.datetime.utcnow() == datetime.datetime(1970, 1, 1)
assert time.time() == 0.0
assert time.monotonic() >= 0.0
assert time.perf_counter() >= 0.0
freezer.stop()
def test_tz_offset_with_time() -> None:
# we expect the system to behave like a system with UTC-4 timezone
# at the beginning of the Epoch (wall clock should be 4 hrs late)
freezer = freeze_time('1970-01-01', tz_offset=-4)
freezer.start()
assert datetime.date.today() == datetime.date(1969, 12, 31)
assert datetime.datetime.now() == datetime.datetime(1969, 12, 31, 20)
assert datetime.datetime.utcnow() == datetime.datetime(1970, 1, 1)
assert time.time() == 0.0
assert time.monotonic() >= 0
assert time.perf_counter() >= 0
freezer.stop()
def test_time_with_microseconds() -> None:
freezer = freeze_time(datetime.datetime(1970, 1, 1, 0, 0, 1, 123456))
freezer.start()
assert time.time() == 1.123456
freezer.stop()
def test_time_with_dst() -> None:
freezer = freeze_time(datetime.datetime(1970, 6, 1, 0, 0, 1, 123456))
freezer.start()
assert time.time() == 13046401.123456
freezer.stop()
def test_manual_increment() -> None:
initial_datetime = datetime.datetime(year=1, month=7, day=12,
hour=15, minute=6, second=3)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
expected = initial_datetime + datetime.timedelta(seconds=1)
assert frozen_datetime.tick() == expected
assert frozen_datetime() == expected
expected = initial_datetime + datetime.timedelta(seconds=11)
assert frozen_datetime.tick(10) == expected
assert frozen_datetime() == expected
expected = initial_datetime + datetime.timedelta(seconds=21)
assert frozen_datetime.tick(delta=datetime.timedelta(seconds=10)) == expected
assert frozen_datetime() == expected
expected = initial_datetime + datetime.timedelta(seconds=22.5)
ticked_time = frozen_datetime.tick(
delta=fractions.Fraction(3, 2) # type: ignore
# type hints follow the recommendation of
# https://peps.python.org/pep-0484/#the-numeric-tower
# which means for instance `Fraction`s work at runtime, but not
# during static type analysis
)
assert ticked_time == expected
assert frozen_datetime() == expected
def test_move_to() -> None:
initial_datetime = datetime.datetime(year=1, month=7, day=12,
hour=15, minute=6, second=3)
other_datetime = datetime.datetime(year=2, month=8, day=13,
hour=14, minute=5, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
frozen_datetime.move_to(other_datetime)
assert frozen_datetime() == other_datetime
frozen_datetime.move_to(initial_datetime)
assert frozen_datetime() == initial_datetime
def test_bad_time_argument() -> None:
try:
freeze_time("2012-13-14", tz_offset=-4)
except ValueError:
pass
else:
assert False, "Bad values should raise a ValueError"
@pytest.mark.parametrize("func_name, has_func, tick_size", (
("monotonic", True, 1.0),
("monotonic_ns", HAS_MONOTONIC_NS, int(1e9)),
("perf_counter", True, 1.0),
("perf_counter_ns", HAS_PERF_COUNTER_NS, int(1e9)),)
)
def test_time_monotonic(func_name: str, has_func: bool, tick_size: int) -> None:
initial_datetime = datetime.datetime(year=1, month=7, day=12,
hour=15, minute=6, second=3)
if not has_func:
pytest.skip("%s does not exist in current version" % func_name)
with freeze_time(initial_datetime) as frozen_datetime:
func = getattr(time, func_name)
t0 = func()
frozen_datetime.tick()
t1 = func()
assert t1 == t0 + tick_size
frozen_datetime.tick(10)
t11 = func()
assert t11 == t1 + 10 * tick_size
def test_time_gmtime() -> None:
with freeze_time('2012-01-14 03:21:34'):
time_struct = time.gmtime()
assert time_struct.tm_year == 2012
assert time_struct.tm_mon == 1
assert time_struct.tm_mday == 14
assert time_struct.tm_hour == 3
assert time_struct.tm_min == 21
assert time_struct.tm_sec == 34
assert time_struct.tm_wday == 5
assert time_struct.tm_yday == 14
assert time_struct.tm_isdst == -1
@pytest.mark.skipif(not HAS_CLOCK,
reason="time.clock was removed in Python 3.8")
def test_time_clock() -> None:
with freeze_time('2012-01-14 03:21:34'):
assert time.clock() == 0 # type: ignore[attr-defined]
with freeze_time('2012-01-14 03:21:35'):
assert time.clock() == 1 # type: ignore[attr-defined]
with freeze_time('2012-01-14 03:21:36'):
assert time.clock() == 2 # type: ignore[attr-defined]
| temp_locale |
python | huggingface__transformers | tests/models/mgp_str/test_modeling_mgp_str.py | {
"start": 7915,
"end": 8994
} | class ____(unittest.TestCase):
@slow
def test_inference(self):
model_name = "alibaba-damo/mgp-str-base"
model = MgpstrForSceneTextRecognition.from_pretrained(model_name).to(torch_device)
processor = MgpstrProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(inputs)
# verify the logits
self.assertEqual(outputs.logits[0].shape, torch.Size((1, 27, 38)))
out_strs = processor.batch_decode(outputs.logits)
expected_text = "ticket"
self.assertEqual(out_strs["generated_text"][0], expected_text)
expected_slice = torch.tensor(
[[[-39.5397, -44.4024, -36.1844], [-61.4709, -63.8639, -58.3454], [-74.0225, -68.5494, -71.2164]]],
device=torch_device,
)
torch.testing.assert_close(outputs.logits[0][:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
| MgpstrModelIntegrationTest |
python | realpython__materials | top-python-game-engines/arcade/arcade_basic.py | {
"start": 458,
"end": 2125
} | class ____(arcade.Window):
"""Main game window"""
def __init__(self, width: int, height: int, title: str):
"""Initialize the window to a specific size
Arguments:
width {int} -- Width of the window
height {int} -- Height of the window
title {str} -- Title for the window
"""
# Call the parent class constructor
super().__init__(width, height, title)
# Set the background window
arcade.set_background_color(color=arcade.color.WHITE)
def on_draw(self):
"""Called once per frame to render everything on the screen"""
# Start rendering
arcade.start_render()
# Draw a blue circle with a radius of 50 in the center of the screen
arcade.draw_circle_filled(
center_x=WIDTH // 2,
center_y=HEIGHT // 2,
radius=50,
color=arcade.color.BLUE,
num_segments=50,
)
# Draw a red-outlined square in the top-left corner of the screen
arcade.draw_lrtb_rectangle_outline(
left=50,
top=HEIGHT - 50,
bottom=HEIGHT - 100,
right=100,
color=arcade.color.RED,
border_width=3,
)
# Draw an orange caption along the bottom in 60-point font
arcade.draw_text(
text="Hello, World! From Arcade!",
start_x=100,
start_y=50,
font_size=28,
color=arcade.color.ORANGE,
)
# Run the program
if __name__ == "__main__":
arcade_game = ArcadeBasic(WIDTH, HEIGHT, "Arcade Basic Game")
arcade.run()
| ArcadeBasic |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 59479,
"end": 59648
} | class ____(themeable):
"""
Legend key background height
Parameters
----------
theme_element : float
Value in points.
"""
| legend_key_height |
python | pydata__xarray | xarray/core/treenode.py | {
"start": 461,
"end": 604
} | class ____(ValueError):
"""Raised when operation can't be completed because one node is not part of the expected tree."""
| NotFoundInTreeError |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 77197,
"end": 77682
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
| AnnotatedSubNestedModel |
python | pytorch__pytorch | test/inductor/test_memory_planning.py | {
"start": 1008,
"end": 6206
} | class ____(TestCase):
device = GPU_TYPE
def _generate(self, *, device):
"""
Generate a simple test case that has multiple simultaneously-live intermediate tensors.
"""
class Foo(torch.nn.Module):
def forward(self, x, y, z):
t0 = x.matmul(y)
t1 = x.matmul(z)
t0 = x.transpose(0, 1).matmul(t1)
t1 = x.matmul(t0)
return t0.sum() + t1.sum()
x = torch.randn((3, 2), device=device)
y = torch.randn((2, 4), device=device)
z = torch.randn((2, 3), device=device)
return (Foo(), (x, y, z))
def test_python_wrapper(self):
f, args = self._generate(device=GPU_TYPE)
compiled = torch.compile(f, dynamic=True)
result, code = run_and_get_cpp_code(compiled, *args)
FileCheck().check(
"pool1 = empty_strided_"
+ GPU_TYPE
+ "((4*s27*s77 + align(4*s77*s77), ), (1, )"
).check_next(
"buf0 = alloc_from_pool(pool1, 0, torch.float32, (s77, s77), (s77, 1))"
).check("buf1 = alloc_from_pool(pool1, align(4*s77*s77),").run(code)
self.assertTrue(same(f(*args), result))
def test_cpp_wrapper(self):
f, args = self._generate(device=GPU_TYPE)
compiled = torch.compile(f, dynamic=True)
with config.patch({"cpp_wrapper": True}):
result, code = run_and_get_cpp_code(compiled, *args)
FileCheck().check(
"aoti_torch__alloc_from_pool(pool1, 0, cached_torch_dtype_float32, 2, int_array_2, int_array_3, &tmp_tensor_handle_0)"
).check_next("auto buf0 = RAIIAtenTensorHandle(tmp_tensor_handle_0);").check(
"auto buf1 = RAIIAtenTensorHandle(tmp_tensor_handle_1);"
).run(code)
self.assertTrue(same(f(*args), result))
def test_aoti(self):
f, args = self._generate(device=GPU_TYPE)
dim0_x = Dim("dim0_x", min=1, max=2048)
dynamic_shapes = ({0: dim0_x}, None, None)
result, code = run_and_get_cpp_code(
lambda: AOTIRunnerUtil.run(f, args, dynamic_shapes=dynamic_shapes)
)
FileCheck().check(
"int64_t int_array_0[] = {24L + align(12L*s6), };"
).check_next("int64_t int_array_1[] = {1L, };").check_next(
"AtenTensorHandle pool1_handle;"
).check_next(
"aoti_torch_empty_strided(1, int_array_0, int_array_1,"
).check_next("RAIIAtenTensorHandle pool1(pool1_handle);").check_next(
"int64_t int_array_2[] = {s6, 3L};"
).check_next("int64_t int_array_3[] = {3L, 1L};").check_next(
"AtenTensorHandle tmp_tensor_handle_0;"
).check_next("aoti_torch__alloc_from_pool(pool1, 0").run(code)
self.assertTrue(same(f(*args), result))
@config.patch({"triton.autotune_at_compile_time": False})
def test_unbacked_symint(self):
# when allocation's size has unbacked symints
# the unbacked symints are only available after computed
if self.device != GPU_TYPE:
raise unittest.SkipTest("requires GPU")
class Repro(torch.nn.Module):
def forward(self, x, y):
x = x + 1
u0 = x.item()
torch._check(u0 >= 1)
s0 = y.size(0)
expr = u0 * s0
sevens = torch.empty_strided(
size=(10, expr, 32), stride=(expr * 32, 32, 1), device=x.device
).fill_(7)
return sevens * 3
example_inputs = (
torch.scalar_tensor(2, dtype=torch.int, device=self.device),
torch.ones(8, device=self.device),
)
model = Repro().to(self.device)
result, code = run_and_get_cpp_code(
lambda: AOTIRunnerUtil.run(model, example_inputs)
)
self.assertTrue(same(model(*example_inputs), result))
# check allocation is done after the unbacked symint is computed
FileCheck().check("auto u0 = u0_raw;").check(
"const int64_t int_array_2[] = {10L, 8L*u0, 32L};"
).check("AtenTensorHandle pool0_handle;").check(
"aoti_torch_empty_strided(3, int_array_2, int_array_3"
).run(code)
# all AtenTensorHandle allocated using aoti_torch__alloc_from_pool are wrapped with RAIIAtenTensorHandle
# otherwise we'll have memory leak
FileCheck().check_count(
"aoti_torch__alloc_from_pool(pool1", 1, exactly=True
).check_count("aoti_torch__alloc_from_pool(pool0", 1, exactly=True).run(code)
FileCheck().check(
"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch__alloc_from_pool(pool1, 0, cached_torch_dtype_int32, 0, int_array_1, int_array_1, &tmp_tensor_handle_0));" # noqa: B950
).check("RAIIAtenTensorHandle(tmp_tensor_handle_0);").check(
"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch__alloc_from_pool(pool0, 0, cached_torch_dtype_float32, 3, int_array_4, int_array_5, &tmp_tensor_handle_1));" # noqa: B950
).check("RAIIAtenTensorHandle(tmp_tensor_handle_1);").run(code)
if __name__ == "__main__":
if HAS_GPU:
run_tests()
| TestMemoryPlanning |
python | walkccc__LeetCode | solutions/1619. Mean of Array After Removing Some Elements/1619.py | {
"start": 0,
"end": 143
} | class ____:
def trimMean(self, arr: list[int]) -> float:
arr.sort()
offset = len(arr) // 20
return mean(arr[offset:-offset])
| Solution |
python | pytorch__pytorch | test/distributed/test_local_tensor.py | {
"start": 2308,
"end": 10098
} | class ____(LocalTensorTestBase):
world_size = 2
def test_local_tensor_dtype_consistency(self):
"""Test that LocalTensor enforces dtype consistency."""
device = torch.device("cpu")
shape = (2, 3)
inconsistent_tensors = {
0: torch.randn(shape, dtype=torch.float32, device=device),
1: torch.randn(
shape, dtype=torch.float64, device=device
), # Different dtype
}
with self.assertRaises(AssertionError):
LocalTensor(inconsistent_tensors)
def test_local_tensor_creation_fails_with_grad_tensors(self):
"""Test that LocalTensor creation fails when local tensors have requires_grad=True."""
device = torch.device("cpu")
shape = (2, 3)
dtype = torch.float32
# Create sample local tensors for different ranks
local_tensors = {
0: torch.randn(shape, dtype=dtype, device=device, requires_grad=True),
1: torch.randn(shape, dtype=dtype, device=device, requires_grad=True),
}
with self.assertRaises(AssertionError):
LocalTensor(local_tensors)
# TODO: test flatten/unflatten
def test_basic_arithmetic_operations(self):
"""Test basic arithmetic operations on LocalTensors."""
device = torch.device("cpu")
shape = (2, 3)
dtype = torch.float32
# Create identical local tensors for consistency tests
base_tensor = torch.randn(shape, dtype=dtype, device=device)
identical_local_tensors = {
0: base_tensor.clone(),
1: base_tensor.clone(),
}
lt1 = LocalTensor(identical_local_tensors)
lt2 = LocalTensor(identical_local_tensors)
# Test addition
result_add = lt1 + lt2
self.assertIsInstance(result_add, LocalTensor)
self.assertEqual(len(result_add._local_tensors), 2)
# Verify the operation was applied to each local tensor
for rank in identical_local_tensors:
expected = identical_local_tensors[rank] + identical_local_tensors[rank]
self.assertEqual(result_add._local_tensors[rank], expected)
# Test multiplication
result_mul = lt1 * 2.0
self.assertIsInstance(result_mul, LocalTensor)
for rank in identical_local_tensors:
expected = identical_local_tensors[rank] * 2.0
self.assertEqual(result_mul._local_tensors[rank], expected)
# TODO: consider an op-info test; we don't actually need to cover all ops
# but it will help make sure views and more exotic things are done
# correctly (in standard subclass style)
def test_mixed_operations_with_regular_tensors(self):
"""Test operations between LocalTensors and regular tensors."""
device = torch.device("cpu")
shape = (2, 3)
dtype = torch.float32
# Create identical local tensors for consistency tests
base_tensor = torch.randn(shape, dtype=dtype, device=device)
identical_local_tensors = {
0: base_tensor.clone(),
1: base_tensor.clone(),
}
lt = LocalTensor(identical_local_tensors)
regular_tensor = torch.ones_like(identical_local_tensors[0])
# Test LocalTensor + regular tensor
result = lt + regular_tensor
self.assertIsInstance(result, LocalTensor)
for rank in identical_local_tensors:
expected = identical_local_tensors[rank] + regular_tensor
self.assertEqual(result._local_tensors[rank], expected)
def test_local_tensor_mode(self):
"""Test LocalTensorMode functionality."""
device = torch.device("cpu")
shape = (2, 3)
dtype = torch.float32
# Create identical local tensors for consistency tests
base_tensor = torch.randn(shape, dtype=dtype, device=device)
identical_local_tensors = {
0: base_tensor.clone(),
1: base_tensor.clone(),
}
lt = LocalTensor(identical_local_tensors)
with LocalTensorMode(lt._ranks):
result = lt + 1.0
self.assertIsInstance(result, LocalTensor)
regular = torch.ones(2, 2)
regular_result = regular + 1.0
self.assertIsInstance(regular, LocalTensor)
self.assertIsInstance(regular_result, LocalTensor)
def test_empty_local_tensors(self):
"""Test behavior with empty local tensors dict."""
# TODO: raise a better error here
with self.assertRaises(StopIteration): # next() on empty iterator
LocalTensor({})
def test_collectives_within_local_tensor_mode(self):
"""Test that collective operations work within LocalTensorMode context."""
test_tensors = {
0: torch.tensor([[1.0, 2.0], [3.0, 4.0]]),
1: torch.tensor([[5.0, 6.0], [7.0, 8.0]]),
}
lt = LocalTensor(test_tensors)
fake_pg = torch.distributed.distributed_c10d._get_default_group()
with LocalTensorMode(lt._ranks):
# Test all_reduce within mode
lt_sum = LocalTensor({k: v.clone() for k, v in test_tensors.items()})
dist.all_reduce(lt_sum, group=fake_pg)
expected_sum = torch.tensor([[6.0, 8.0], [10.0, 12.0]])
for rank in test_tensors:
self.assertEqual(lt_sum._local_tensors[rank], expected_sum)
# Test broadcast within mode
lt_broadcast = LocalTensor({k: v.clone() for k, v in test_tensors.items()})
dist.broadcast(lt_broadcast, src=0, group=fake_pg)
for rank in test_tensors:
self.assertEqual(lt_broadcast._local_tensors[rank], test_tensors[0])
# Test that regular operations still work
result = lt + 1.0
self.assertIsInstance(result, LocalTensor)
def test_scalar_mul_reduction_bug(self):
with LocalTensorMode(self.world_size):
mesh = self.build_device_mesh()
tensor = torch.tensor([10, 10]).float()
dt = distribute_tensor(tensor, device_mesh=mesh, placements=[Shard(0)])
y = dt.sum() * 1 # noqa: F841
tensor = torch.arange(10).reshape(10, 1).float().requires_grad_()
dt = distribute_tensor(tensor, device_mesh=mesh, placements=[Shard(0)])
print(dt.sum() * 1, dt.sum() * 2, dt.sum() * 3)
def test_uneven_sharding_mean_bug(self):
with LocalTensorMode(self.world_size):
mesh = self.build_device_mesh()
tensor = torch.arange(12).reshape(-1, 4).float()
dt = distribute_tensor(tensor, device_mesh=mesh, placements=[Shard(0)])
mean = dt.mean()
self.assertEqual(mean.placements, [Replicate()])
full = mean.full_tensor()
self.assertEqual(tensor.mean(), full)
def test_uneven_sharding_prod(self):
with LocalTensorMode(self.world_size):
mesh = self.build_device_mesh()
tensor = (torch.arange(12) + 1).reshape(-1, 4).float()
dt = distribute_tensor(tensor, device_mesh=mesh, placements=[Shard(0)])
x = dt.prod()
full = x.full_tensor()
self.assertEqual(tensor.prod(), full)
def test_even_sharding_mean_is_partial(self):
with LocalTensorMode(self.world_size):
mesh = self.build_device_mesh()
tensor = torch.arange(16).reshape(4, 4).float()
dt = distribute_tensor(tensor, device_mesh=mesh, placements=[Shard(0)])
mean = dt.mean()
full = mean.full_tensor()
self.assertEqual(tensor.mean(), full)
self.assertEqual(mean.placements, [Partial("avg")])
| TestLocalTensorWorld2 |
python | dask__dask | dask/tests/test_delayed.py | {
"start": 3093,
"end": 25834
} | class ____:
a: int
@pytest.mark.parametrize("cls", (ANonFrozenDataClass, AFrozenDataClass))
def test_delayed_with_dataclass(cls):
literal = delayed(3)
with_class = delayed({"data": cls(a=literal)})
def return_nested(obj):
return obj["data"].a
final = delayed(return_nested)(with_class)
assert final.compute() == 3
def test_delayed_with_dataclass_with_custom_init():
@dataclass()
class ADataClass:
a: int # type: ignore[annotation-unchecked]
def __init__(self, b: int):
self.a = b
literal = dask.delayed(3)
with pytest.raises(TypeError) as e:
dask.delayed({"data": ADataClass(b=literal)})
e.match(r"ADataClass")
e.match(r"custom __init__ is not supported")
def test_delayed_with_dataclass_with_eager_custom_init():
@dataclass()
class ADataClass:
a: int # type: ignore[annotation-unchecked]
def __init__(self, b: int):
self.a = b
with_class = delayed({"data": ADataClass(b=3)})
def return_nested(obj):
return obj["data"].a
final = delayed(return_nested)(with_class)
assert final.compute() == 3
def test_delayed_with_eager_dataclass_with_set_init_false_field():
@dataclass
class ADataClass:
a: int # type: ignore[annotation-unchecked]
b: int = field(init=False) # type: ignore[annotation-unchecked]
def prep_dataclass(a):
data = ADataClass(a=a)
data.b = 4
return data
with_class = delayed({"data": prep_dataclass(3)})
def return_nested(obj):
return obj["data"].a
final = delayed(return_nested)(with_class)
assert final.compute() == 3
def test_delayed_with_dataclass_with_set_init_false_field():
@dataclass
class ADataClass:
a: int # type: ignore[annotation-unchecked]
b: int = field(init=False) # type: ignore[annotation-unchecked]
literal = dask.delayed(3)
def prep_dataclass(a):
data = ADataClass(a=a)
data.b = 4
return data
with pytest.raises(ValueError) as e:
dask.delayed(prep_dataclass(literal))
e.match(r"ADataClass")
e.match(r"`init=False` are not supported")
def test_delayed_with_dataclass_with_unset_init_false_field():
@dataclass
class ADataClass:
a: int # type: ignore[annotation-unchecked]
b: int = field(init=False) # type: ignore[annotation-unchecked]
literal = dask.delayed(3)
with_class = delayed({"data": ADataClass(a=literal)})
def return_nested(obj):
return obj["data"].a
final = delayed(return_nested)(with_class)
assert final.compute() == 3
def test_operators():
a = delayed([1, 2, 3])
assert a[0].compute() == 1
assert (a + a).compute() == [1, 2, 3, 1, 2, 3]
b = delayed(2)
assert a[:b].compute() == [1, 2]
a = delayed(10)
assert (a + 1).compute() == 11
assert (1 + a).compute() == 11
assert (a >> 1).compute() == 5
assert (a > 2).compute()
assert (a**2).compute() == 100
class dummy:
def __matmul__(self, other):
return 4
c = delayed(dummy())
d = delayed(dummy())
assert (c @ d).compute() == 4
def test_methods():
a = delayed("a b c d e")
assert a.split(" ").compute() == ["a", "b", "c", "d", "e"]
assert a.upper().replace("B", "A").split().count("A").compute() == 2
assert a.split(" ", pure=True).key == a.split(" ", pure=True).key
o = a.split(" ", dask_key_name="test")
assert o.key == "test"
def test_attributes():
a = delayed(2 + 1j)
assert a.real._key == a.real._key
assert a.real.compute() == 2
assert a.imag.compute() == 1
assert (a.real + a.imag).compute() == 3
def test_method_getattr_call_same_task():
a = delayed([1, 2, 3])
o = a.index(1)
# Don't getattr the method, then call in separate task
tasks = {v.func for v in o.__dask_graph__().values() if isinstance(v, Task)}
assert tasks
assert getattr not in tasks
def test_np_dtype_of_delayed():
# This used to result in a segfault due to recursion, see
# https://github.com/dask/dask/pull/4374#issuecomment-454381465
np = pytest.importorskip("numpy")
x = delayed(1)
with pytest.raises(TypeError):
np.dtype(x)
assert delayed(np.array([1], dtype="f8")).dtype.compute() == np.dtype("f8")
def test_delayed_visualise_warn():
# Raise a warning when user calls visualise()
# instead of visualize()
def inc(x):
return x + 1
z = dask.delayed(inc)(1)
z.compute()
with pytest.warns(
UserWarning, match="dask.delayed objects have no `visualise` method"
):
z.visualise(file_name="desk_graph.svg")
# with no args
with pytest.warns(
UserWarning, match="dask.delayed objects have no `visualise` method"
):
z.visualise()
def test_delayed_errors():
a = delayed([1, 2, 3])
# Immutable
pytest.raises(TypeError, lambda: setattr(a, "foo", 1))
pytest.raises(TypeError, lambda: setitem(a, 1, 0))
# Can't iterate, or check if contains
pytest.raises(TypeError, lambda: 1 in a)
pytest.raises(TypeError, lambda: list(a))
# No dynamic generation of magic/hidden methods
pytest.raises(AttributeError, lambda: a._hidden())
# Truth of delayed forbidden
pytest.raises(TypeError, lambda: bool(a))
def test_common_subexpressions():
a = delayed([1, 2, 3])
res = a[0] + a[0]
assert a[0].key in res.dask
assert a.key in res.dask
assert len(res.dask) == 3
def test_lists():
a = delayed(1)
b = delayed(2)
c = delayed(sum)([a, b])
assert c.compute() == 3
def test_literates():
a = delayed(1)
b = a + 1
lit = (a, b, 3)
assert delayed(lit).compute() == (1, 2, 3)
lit = [a, b, 3]
assert delayed(lit).compute() == [1, 2, 3]
lit = {a, b, 3}
assert delayed(lit).compute() == {1, 2, 3}
lit = {a: "a", b: "b", 3: "c"}
assert delayed(lit).compute() == {1: "a", 2: "b", 3: "c"}
assert delayed(lit)[a].compute() == "a"
lit = {"a": a, "b": b, "c": 3}
assert delayed(lit).compute() == {"a": 1, "b": 2, "c": 3}
assert delayed(lit)["a"].compute() == 1
def test_literates_keys():
a = delayed(1)
b = a + 1
lit = (a, b, 3)
assert delayed(lit).key != delayed(lit).key
assert delayed(lit, pure=True).key == delayed(lit, pure=True).key
def test_lists_are_concrete():
a = delayed(1)
b = delayed(2)
c = delayed(max)([[a, 10], [b, 20]], key=lambda x: x[0])[1]
assert c.compute() == 20
@pytest.mark.parametrize("typ", [list, tuple, set])
def test_iterators(typ):
a = delayed(1)
b = delayed(2)
c = delayed(sum)(iter(typ([a, b])))
x = c.compute()
assert x == 3
def f(seq):
return sum(seq)
c = delayed(f)(iter(typ([a, b])))
assert c.compute() == 3
def test_traverse_false():
# Create a list with a dask value, and test that it's not computed
def fail(*args):
raise ValueError("shouldn't have computed")
a = delayed(fail)()
# list
x = [a, 1, 2, 3]
res = delayed(x, traverse=False).compute()
assert len(res) == 4
assert res[0] is a
assert res[1:] == x[1:]
# tuple that looks like a task
x = (fail, a, (fail, a))
res = delayed(x, traverse=False).compute()
assert isinstance(res, tuple)
assert res[0] == fail
assert res[1] is a
# list containing task-like-things
x = [1, (fail, a), a]
res = delayed(x, traverse=False).compute()
assert isinstance(res, list)
assert res[0] == 1
assert res[1][0] == fail and res[1][1] is a
assert res[2] is a
# traverse=False still hits top level
b = delayed(1)
x = delayed(b, traverse=False)
assert x.compute() == 1
def test_pure():
v1 = delayed(add, pure=True)(1, 2)
v2 = delayed(add, pure=True)(1, 2)
assert v1.key == v2.key
myrand = delayed(random)
assert myrand().key != myrand().key
def test_pure_global_setting():
# delayed functions
func = delayed(add)
with dask.config.set(delayed_pure=True):
assert func(1, 2).key == func(1, 2).key
with dask.config.set(delayed_pure=False):
assert func(1, 2).key != func(1, 2).key
func = delayed(add, pure=True)
with dask.config.set(delayed_pure=False):
assert func(1, 2).key == func(1, 2).key
# delayed objects
assert delayed(1).key != delayed(1).key
with dask.config.set(delayed_pure=True):
assert delayed(1).key == delayed(1).key
with dask.config.set(delayed_pure=False):
assert delayed(1, pure=True).key == delayed(1, pure=True).key
# delayed methods
data = delayed([1, 2, 3])
assert data.index(1).key != data.index(1).key
with dask.config.set(delayed_pure=True):
assert data.index(1).key == data.index(1).key
assert data.index(1, pure=False).key != data.index(1, pure=False).key
with dask.config.set(delayed_pure=False):
assert data.index(1, pure=True).key == data.index(1, pure=True).key
# magic methods always pure
with dask.config.set(delayed_pure=False):
assert data.index.key == data.index.key
element = data[0]
assert (element + element).key == (element + element).key
def test_nout():
func = delayed(lambda x: (x, -x), nout=2, pure=True)
x = func(1)
assert len(x) == 2
a, b = x
assert compute(a, b) == (1, -1)
assert a._length is None
assert b._length is None
pytest.raises(TypeError, lambda: len(a))
pytest.raises(TypeError, lambda: list(a))
pytest.raises(ValueError, lambda: delayed(add, nout=-1))
pytest.raises(ValueError, lambda: delayed(add, nout=True))
func = delayed(add, nout=None)
a = func(1)
assert a._length is None
pytest.raises(TypeError, lambda: list(a))
pytest.raises(TypeError, lambda: len(a))
func = delayed(lambda x: (x,), nout=1, pure=True)
x = func(1)
assert len(x) == 1
(a,) = x
assert a.compute() == 1
assert a._length is None
pytest.raises(TypeError, lambda: len(a))
func = delayed(lambda x: tuple(), nout=0, pure=True)
x = func(1)
assert len(x) == 0
assert x.compute() == tuple()
@pytest.mark.parametrize(
"x",
[[1, 2], (1, 2), (add, 1, 2), [], ()],
)
def test_nout_with_tasks(x):
length = len(x)
d = delayed(x, nout=length)
assert len(d) == length
assert len(list(d)) == length
assert d.compute() == x
def test_kwargs():
def mysum(a, b, c=(), **kwargs):
return a + b + sum(c) + sum(kwargs.values())
dmysum = delayed(mysum)
ten = dmysum(1, 2, c=[delayed(3), 0], four=dmysum(2, 2))
assert ten.compute() == 10
dmysum = delayed(mysum, pure=True)
c = [delayed(3), 0]
ten = dmysum(1, 2, c=c, four=dmysum(2, 2))
assert ten.compute() == 10
assert dmysum(1, 2, c=c, four=dmysum(2, 2)).key == ten.key
assert dmysum(1, 2, c=c, four=dmysum(2, 3)).key != ten.key
assert dmysum(1, 2, c=c, four=4).key != ten.key
assert dmysum(1, 2, c=c, four=4).key != dmysum(2, 2, c=c, four=4).key
def test_custom_delayed():
x = Tuple({"a": 1, "b": 2, "c": (add, "a", "b")}, ["a", "b", "c"])
x2 = delayed(add, pure=True)(x, (4, 5, 6))
n = delayed(len, pure=True)(x)
assert delayed(len, pure=True)(x).key == n.key
assert x2.compute() == (1, 2, 3, 4, 5, 6)
assert compute(n, x2, x) == (3, (1, 2, 3, 4, 5, 6), (1, 2, 3))
@pytest.mark.filterwarnings("ignore:The dask.delayed:UserWarning")
def test_array_delayed():
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
arr = np.arange(100).reshape((10, 10))
darr = da.from_array(arr, chunks=(5, 5))
val = delayed(sum)([arr, darr, 1])
assert isinstance(val, Delayed)
assert np.allclose(val.compute(), arr + arr + 1)
assert val.sum().compute() == (arr + arr + 1).sum()
assert val[0, 0].compute() == (arr + arr + 1)[0, 0]
task, dsk = to_task_dask(darr)
assert not darr.dask.keys() - dsk.keys()
diff = dsk.keys() - darr.dask.keys()
assert len(diff) == 1
delayed_arr = delayed(darr)
assert (delayed_arr.compute() == arr).all()
def test_array_delayed_complex_optimization():
# Ensure that when multiple collections are passed to a Delayed function,
# they are optimized together
np = pytest.importorskip("numpy")
pytest.importorskip("dask.array")
from dask.array.core import from_func
from dask.array.utils import assert_eq
called = False
def only_once():
nonlocal called
if called:
raise RuntimeError("Already executed")
called = True
return np.arange(100).reshape((10, 10))
darr = from_func(only_once, shape=(10, 10), dtype=int)
a = darr + 1
b = darr + 2
val = delayed(sum)([a, b, 1])
assert isinstance(val, Delayed)
np_arr = np.arange(100).reshape((10, 10))
assert_eq(val.compute(), (np_arr + 1) + (np_arr + 2) + 1)
def test_array_delayed_complex_optimization_kwargs():
# Ensure that collections that if multiple collections are passed to a
# Delayed function that they are optimized together
np = pytest.importorskip("numpy")
pytest.importorskip("dask.array")
from dask.array.core import from_func
from dask.array.utils import assert_eq
called = False
def only_once():
nonlocal called
if called:
raise RuntimeError("Already executed")
called = True
return np.arange(100).reshape((10, 10))
darr = from_func(only_once, shape=(10, 10), dtype=int)
a = darr + 1
b = darr + 2
def sum_kwargs_only(*, a, b, c):
return sum([a, b, c])
val = delayed(sum_kwargs_only)(a=a, b=b, c=1)
assert isinstance(val, Delayed)
np_arr = np.arange(100).reshape((10, 10))
assert_eq(val.compute(), (np_arr + 1) + (np_arr + 2) + 1)
def test_array_bag_delayed():
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
arr1 = np.arange(100).reshape((10, 10))
arr2 = arr1.dot(arr1.T)
darr1 = da.from_array(arr1, chunks=(5, 5))
darr2 = da.from_array(arr2, chunks=(5, 5))
b = db.from_sequence([1, 2, 3])
seq = [arr1, arr2, darr1, darr2, b]
out = delayed(sum)([i.sum() for i in seq])
assert out.compute() == 2 * arr1.sum() + 2 * arr2.sum() + sum([1, 2, 3])
def test_delayed_picklable():
# Delayed
x = delayed(divmod, nout=2, pure=True)(1, 2)
y = pickle.loads(pickle.dumps(x))
assert x.dask == y.dask
assert x._key == y._key
assert x._length == y._length
# DelayedLeaf
x = delayed(1j + 2)
y = pickle.loads(pickle.dumps(x))
assert x.dask == y.dask
assert x._key == y._key
assert x._nout == y._nout
assert x._pure == y._pure
# DelayedAttr
x = x.real
y = pickle.loads(pickle.dumps(x))
assert x._obj._key == y._obj._key
assert x._obj.dask == y._obj.dask
assert x._attr == y._attr
assert x._key == y._key
def test_delayed_compute_forward_kwargs():
x = delayed(1) + 2
x.compute(bogus_keyword=10)
def test_delayed_method_descriptor():
delayed(bytes.decode)(b"") # does not err
def test_delayed_callable():
f = delayed(add, pure=True)
v = f(1, 2)
assert v.compute() == 3
assert f.compute() == add
def test_delayed_name_on_call():
f = delayed(add, pure=True)
assert f(1, 2, dask_key_name="foo")._key == "foo"
def test_callable_obj():
class Foo:
def __init__(self, a):
self.a = a
def __call__(self):
return 2
foo = Foo(1)
f = delayed(foo)
assert f.compute() is foo
assert f.a.compute() == 1
assert f().compute() == 2
def identity(x):
return x
def test_deterministic_name():
func = delayed(identity, pure=True)
data1 = {"x": 1, "y": 25, "z": [1, 2, 3]}
data2 = {"x": 1, "y": 25, "z": [1, 2, 3]}
assert func(data1)._key == func(data2)._key
def test_sensitive_to_partials():
assert (
delayed(partial(add, 10), pure=True)(2)._key
!= delayed(partial(add, 20), pure=True)(2)._key
)
def test_delayed_name():
assert delayed(1)._key.startswith("int-")
assert delayed(1, pure=True)._key.startswith("int-")
assert delayed(1, name="X")._key == "X"
def myfunc(x):
return x + 1
assert delayed(myfunc)(1).key.startswith("myfunc")
def test_finalize_name():
pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
v = delayed([x])
assert set(x.dask).issubset(v.dask)
def key(s):
if isinstance(s, tuple):
s = s[0]
# Ignore _ in 'ones_like'
return s.split("-")[0].replace("_", "")
assert all(key(k).isalpha() for k in v.dask)
def test_keys_from_array():
pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
from dask.array.utils import _check_dsk
X = da.ones((10, 10), chunks=5).to_delayed().flatten()
xs = [delayed(inc)(x) for x in X]
_check_dsk(xs[0].dask)
# Mostly copied from https://github.com/pytoolz/toolz/pull/220
def test_delayed_decorator_on_method():
class A:
BASE = 10
def __init__(self, base):
self.BASE = base
@delayed
def addmethod(self, x, y):
return self.BASE + x + y
@classmethod
@delayed
def addclass(cls, x, y):
return cls.BASE + x + y
@staticmethod
@delayed
def addstatic(x, y):
return x + y
a = A(100)
assert a.addmethod(3, 4).compute() == 107
assert A.addmethod(a, 3, 4).compute() == 107
assert a.addclass(3, 4).compute() == 17
assert A.addclass(3, 4).compute() == 17
assert a.addstatic(3, 4).compute() == 7
assert A.addstatic(3, 4).compute() == 7
# We want the decorated methods to be actual methods for instance methods
# and class methods since their first arguments are the object and the
# class respectively. Or in other words, the first argument is generated by
# the runtime based on the object/class before the dot.
assert isinstance(a.addmethod, types.MethodType)
assert isinstance(A.addclass, types.MethodType)
# For static methods (and regular functions), the decorated methods should
# be Delayed objects.
assert isinstance(A.addstatic, Delayed)
def test_attribute_of_attribute():
x = delayed(123)
assert isinstance(x.a, Delayed)
assert isinstance(x.a.b, Delayed)
assert isinstance(x.a.b.c, Delayed)
def test_check_meta_flag():
pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from pandas import Series
a = Series(["a", "b", "a"], dtype="category")
b = Series(["a", "c", "a"], dtype="category")
da = delayed(lambda x: x)(a)
db = delayed(lambda x: x)(b)
c = dd.from_delayed([da, db], verify_meta=False)
dd.utils.assert_eq(c, c)
def modlevel_eager(x):
return x + 1
@delayed
def modlevel_delayed1(x):
return x + 1
@delayed(pure=False)
def modlevel_delayed2(x):
return x + 1
@pytest.mark.parametrize(
"f",
[
delayed(modlevel_eager),
pytest.param(modlevel_delayed1, marks=pytest.mark.xfail(reason="#3369")),
pytest.param(modlevel_delayed2, marks=pytest.mark.xfail(reason="#3369")),
],
)
def test_pickle(f):
d = f(2)
d = pickle.loads(pickle.dumps(d, protocol=pickle.HIGHEST_PROTOCOL))
assert d.compute() == 3
@pytest.mark.parametrize(
"f", [delayed(modlevel_eager), modlevel_delayed1, modlevel_delayed2]
)
def test_cloudpickle(f):
d = f(2)
d = cloudpickle.loads(cloudpickle.dumps(d, protocol=pickle.HIGHEST_PROTOCOL))
assert d.compute() == 3
def test_dask_layers():
d1 = delayed(1)
assert d1.dask.layers.keys() == {d1.key}
assert d1.dask.dependencies == {d1.key: set()}
assert d1.__dask_layers__() == (d1.key,)
d2 = modlevel_delayed1(d1)
assert d2.dask.layers.keys() == {d1.key, d2.key}
assert d2.dask.dependencies == {d1.key: set(), d2.key: {d1.key}}
assert d2.__dask_layers__() == (d2.key,)
hlg = HighLevelGraph.from_collections("foo", {"alias": d2.key}, dependencies=[d2])
with pytest.raises(ValueError, match="not in"):
Delayed("alias", hlg)
explicit = Delayed("alias", hlg, layer="foo")
assert explicit.__dask_layers__() == ("foo",)
explicit.dask.validate()
def test_annotations_survive_optimization():
with dask.annotate(foo="bar"):
graph = HighLevelGraph.from_collections(
"b",
{"a": 1, "b": (inc, "a"), "c": (inc, "b")},
[],
)
d = Delayed("b", graph)
assert type(d.dask) is HighLevelGraph
assert len(d.dask.layers) == 1
assert len(d.dask.layers["b"]) == 3
assert d.dask.layers["b"].annotations == {"foo": "bar"}
optimized = collections_to_expr([d]).optimize()
assert optimized.__dask_annotations__() == {
"foo": dict.fromkeys(optimized.__dask_graph__(), "bar")
}
def test_delayed_function_attributes_forwarded():
@delayed
def add(x, y):
"""This is a docstring"""
return x + y
assert add.__name__ == "add"
assert add.__doc__ == "This is a docstring"
assert add.__wrapped__(1, 2) == 3
def test_delayed_fusion():
@delayed
def test(i):
return i + 1
@delayed
def test2(i):
return i + 2
@delayed
def test3(i):
return i + 3
obj = test3(test2(test(10)))
dsk = collections_to_expr([obj]).__dask_graph__()
assert len(dsk) == 3
obj2 = test3(test2(test(10)))
with dask.config.set({"optimization.fuse.delayed": True}):
dsk2 = collections_to_expr([obj]).optimize().__dask_graph__()
result = dask.compute(obj2)
assert len(dsk2) == 2
assert dask.compute(obj) == result
def test_p2p_as_delayed():
pytest.importorskip("distributed")
pytest.importorskip("numpy")
pytest.importorskip("dask.dataframe")
from distributed.utils_test import gen_cluster
import dask.dataframe as dd
@gen_cluster(client=True)
async def _test(c, s, *workers):
test_df = dd.from_dict(
{
"partition": [0, 1, 2, 3, 0, 1, 2, 3],
"value": [1, 2, 3, 4, 5, 6, 7, 8],
},
npartitions=2,
)
part_df = test_df.shuffle("partition", force=True, shuffle_method="p2p")
@delayed
def delayed_func(x):
return x
delay_df = delayed_func(part_df)
dd.utils.assert_eq(
await c.gather(c.compute(delay_df)),
await c.gather(c.compute(part_df)),
)
_test()
| AFrozenDataClass |
python | PyCQA__pylint | tests/functional/ext/no_self_use/no_self_use.py | {
"start": 2923,
"end": 3110
} | class ____:
"""Don't emit no-self-use for overload methods."""
@overload
def a(self, var): ...
@overload
def a(self, var): ...
def a(self, var):
pass
| Foo3 |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail.py | {
"start": 2183,
"end": 2326
} | class ____(BaseModel, from_attributes={}):
# MYPY: error: Invalid value for "Config.from_attributes" [pydantic-config]
pass
| KwargsBadConfig1 |
python | fsspec__filesystem_spec | fsspec/implementations/local.py | {
"start": 12584,
"end": 16936
} | class ____(io.IOBase):
def __init__(
self, path, mode, autocommit=True, fs=None, compression=None, **kwargs
):
logger.debug("open file: %s", path)
self.path = path
self.mode = mode
self.fs = fs
self.f = None
self.autocommit = autocommit
self.compression = get_compression(path, compression)
self.blocksize = io.DEFAULT_BUFFER_SIZE
self._open()
def _open(self):
if self.f is None or self.f.closed:
if self.autocommit or "w" not in self.mode:
self.f = open(self.path, mode=self.mode)
if self.compression:
compress = compr[self.compression]
self.f = compress(self.f, mode=self.mode)
else:
# TODO: check if path is writable?
i, name = tempfile.mkstemp()
os.close(i) # we want normal open and normal buffered file
self.temp = name
self.f = open(name, mode=self.mode)
if "w" not in self.mode:
self.size = self.f.seek(0, 2)
self.f.seek(0)
self.f.size = self.size
def _fetch_range(self, start, end):
# probably only used by cached FS
if "r" not in self.mode:
raise ValueError
self._open()
self.f.seek(start)
return self.f.read(end - start)
def __setstate__(self, state):
self.f = None
loc = state.pop("loc", None)
self.__dict__.update(state)
if "r" in state["mode"]:
self.f = None
self._open()
self.f.seek(loc)
def __getstate__(self):
d = self.__dict__.copy()
d.pop("f")
if "r" in self.mode:
d["loc"] = self.f.tell()
else:
if not self.f.closed:
raise ValueError("Cannot serialise open write-mode local file")
return d
def commit(self):
if self.autocommit:
raise RuntimeError("Can only commit if not already set to autocommit")
try:
shutil.move(self.temp, self.path)
except PermissionError as e:
# shutil.move raises PermissionError if os.rename
# and the default copy2 fallback with shutil.copystats fail.
# The file should be there nonetheless, but without copied permissions.
# If it doesn't exist, there was no permission to create the file.
if not os.path.exists(self.path):
raise e
else:
# If PermissionError is not raised, permissions can be set.
try:
mask = 0o666
os.chmod(self.path, mask & ~get_umask(mask))
except RuntimeError:
pass
def discard(self):
if self.autocommit:
raise RuntimeError("Cannot discard if set to autocommit")
os.remove(self.temp)
def readable(self) -> bool:
return True
def writable(self) -> bool:
return "r" not in self.mode
def read(self, *args, **kwargs):
return self.f.read(*args, **kwargs)
def write(self, *args, **kwargs):
return self.f.write(*args, **kwargs)
def tell(self, *args, **kwargs):
return self.f.tell(*args, **kwargs)
def seek(self, *args, **kwargs):
return self.f.seek(*args, **kwargs)
def seekable(self, *args, **kwargs):
return self.f.seekable(*args, **kwargs)
def readline(self, *args, **kwargs):
return self.f.readline(*args, **kwargs)
def readlines(self, *args, **kwargs):
return self.f.readlines(*args, **kwargs)
def close(self):
return self.f.close()
def truncate(self, size=None) -> int:
return self.f.truncate(size)
@property
def closed(self):
return self.f.closed
def fileno(self):
return self.raw.fileno()
def flush(self) -> None:
self.f.flush()
def __iter__(self):
return self.f.__iter__()
def __getattr__(self, item):
return getattr(self.f, item)
def __enter__(self):
self._incontext = True
return self
def __exit__(self, exc_type, exc_value, traceback):
self._incontext = False
self.f.__exit__(exc_type, exc_value, traceback)
| LocalFileOpener |
python | networkx__networkx | networkx/algorithms/isomorphism/ismags.py | {
"start": 10241,
"end": 11513
} | class ____:
"""Class to handle getitem for undirected edges.
Note that ``items()`` iterates over one of the two representations of the edge
(u, v) and (v, u). So this technically doesn't violate the Mapping
invariant that (k,v) pairs reported by ``items()`` satisfy ``.__getitem__(k) == v``.
But we are violating the spirit of the protocol by having keys available
for lookup by ``__getitem__`` that are not reported by ``items()``.
Note that if we used frozensets for undirected edges we would have the same
behavior we see here. You could ``__getitem__`` either ``{u, v}`` or ``{v, u}``
and get the same value -- yet ``items()`` would only report one of the two.
So from that perspective we *are* following the Mapping protocol. Our keys
are undirected edges. We are using 2-tuples as an imperfect representation
of these edges. We are not using 2-tuples as keys. Only as imperfect edges
and we use the edges as keys.
"""
def __init__(self, edge_dict):
self.edge_dict = edge_dict
def __getitem__(self, edge):
if edge in self.edge_dict:
return self.edge_dict[edge]
return self.edge_dict[edge[::-1]]
def items(self):
return self.edge_dict.items()
| EdgeLookup |
python | kamyu104__LeetCode-Solutions | Python/subarrays-with-k-different-integers.py | {
"start": 1080,
"end": 1669
} | class ____(object):
def subarraysWithKDistinct(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
window1, window2 = Window(), Window()
result, left1, left2 = 0, 0, 0
for i in A:
window1.add(i)
while window1.size() > K:
window1.remove(A[left1])
left1 += 1
window2.add(i)
while window2.size() >= K:
window2.remove(A[left2])
left2 += 1
result += left2-left1
return result
| Solution2 |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-openai/tests/test_openai_responses.py | {
"start": 24862,
"end": 26444
} | class ____(OpenAIResponses):
def __init__(self):
pass
def test__parse_response_output(response_output: List[ResponseOutputItem]):
result = OpenAIResponsesMock()._parse_response_output(output=response_output)
assert (
len(
[
block
for block in result.message.blocks
if isinstance(block, ThinkingBlock)
]
)
== 4
)
assert (
len([block for block in result.message.blocks if isinstance(block, TextBlock)])
== 1
)
assert (
len(
[
block
for block in result.message.blocks
if isinstance(block, ToolCallBlock)
]
)
== 1
)
tool_call = [
block for block in result.message.blocks if isinstance(block, ToolCallBlock)
][0]
assert tool_call.tool_call_id == "1"
assert tool_call.tool_name == "test"
assert tool_call.tool_kwargs == "{'hello': 'world'}"
assert [
block for block in result.message.blocks if isinstance(block, ThinkingBlock)
][0].content == "hello world\nthis is a test"
assert [
block for block in result.message.blocks if isinstance(block, ThinkingBlock)
][1].content == "another test"
assert [
block for block in result.message.blocks if isinstance(block, ThinkingBlock)
][2].content == "another test\nhello"
assert [
block for block in result.message.blocks if isinstance(block, ThinkingBlock)
][3].content == "hello\nworld"
| OpenAIResponsesMock |
python | aimacode__aima-python | csp.py | {
"start": 49880,
"end": 55386
} | class ____(NaryCSP):
def __init__(self, puzzle):
variables = []
for i, line in enumerate(puzzle):
# print line
for j, element in enumerate(line):
if element == '_':
var1 = str(i)
if len(var1) == 1:
var1 = "0" + var1
var2 = str(j)
if len(var2) == 1:
var2 = "0" + var2
variables.append("X" + var1 + var2)
domains = {}
for var in variables:
domains[var] = set(range(1, 10))
constraints = []
for i, line in enumerate(puzzle):
for j, element in enumerate(line):
if element != '_' and element != '*':
# down - column
if element[0] != '':
x = []
for k in range(i + 1, len(puzzle)):
if puzzle[k][j] != '_':
break
var1 = str(k)
if len(var1) == 1:
var1 = "0" + var1
var2 = str(j)
if len(var2) == 1:
var2 = "0" + var2
x.append("X" + var1 + var2)
constraints.append(Constraint(x, sum_constraint(element[0])))
constraints.append(Constraint(x, all_diff_constraint))
# right - line
if element[1] != '':
x = []
for k in range(j + 1, len(puzzle[i])):
if puzzle[i][k] != '_':
break
var1 = str(i)
if len(var1) == 1:
var1 = "0" + var1
var2 = str(k)
if len(var2) == 1:
var2 = "0" + var2
x.append("X" + var1 + var2)
constraints.append(Constraint(x, sum_constraint(element[1])))
constraints.append(Constraint(x, all_diff_constraint))
super().__init__(domains, constraints)
self.puzzle = puzzle
def display(self, assignment=None):
for i, line in enumerate(self.puzzle):
puzzle = ""
for j, element in enumerate(line):
if element == '*':
puzzle += "[*]\t"
elif element == '_':
var1 = str(i)
if len(var1) == 1:
var1 = "0" + var1
var2 = str(j)
if len(var2) == 1:
var2 = "0" + var2
var = "X" + var1 + var2
if assignment is not None:
if isinstance(assignment[var], set) and len(assignment[var]) == 1:
puzzle += "[" + str(first(assignment[var])) + "]\t"
elif isinstance(assignment[var], int):
puzzle += "[" + str(assignment[var]) + "]\t"
else:
puzzle += "[_]\t"
else:
puzzle += "[_]\t"
else:
puzzle += str(element[0]) + "\\" + str(element[1]) + "\t"
print(puzzle)
# ______________________________________________________________________________
# Cryptarithmetic Problem
# [Figure 6.2]
# T W O + T W O = F O U R
two_two_four = NaryCSP({'T': set(range(1, 10)), 'F': set(range(1, 10)),
'W': set(range(0, 10)), 'O': set(range(0, 10)), 'U': set(range(0, 10)), 'R': set(range(0, 10)),
'C1': set(range(0, 2)), 'C2': set(range(0, 2)), 'C3': set(range(0, 2))},
[Constraint(('T', 'F', 'W', 'O', 'U', 'R'), all_diff_constraint),
Constraint(('O', 'R', 'C1'), lambda o, r, c1: o + o == r + 10 * c1),
Constraint(('W', 'U', 'C1', 'C2'), lambda w, u, c1, c2: c1 + w + w == u + 10 * c2),
Constraint(('T', 'O', 'C2', 'C3'), lambda t, o, c2, c3: c2 + t + t == o + 10 * c3),
Constraint(('F', 'C3'), eq)])
# S E N D + M O R E = M O N E Y
send_more_money = NaryCSP({'S': set(range(1, 10)), 'M': set(range(1, 10)),
'E': set(range(0, 10)), 'N': set(range(0, 10)), 'D': set(range(0, 10)),
'O': set(range(0, 10)), 'R': set(range(0, 10)), 'Y': set(range(0, 10)),
'C1': set(range(0, 2)), 'C2': set(range(0, 2)), 'C3': set(range(0, 2)),
'C4': set(range(0, 2))},
[Constraint(('S', 'E', 'N', 'D', 'M', 'O', 'R', 'Y'), all_diff_constraint),
Constraint(('D', 'E', 'Y', 'C1'), lambda d, e, y, c1: d + e == y + 10 * c1),
Constraint(('N', 'R', 'E', 'C1', 'C2'), lambda n, r, e, c1, c2: c1 + n + r == e + 10 * c2),
Constraint(('E', 'O', 'N', 'C2', 'C3'), lambda e, o, n, c2, c3: c2 + e + o == n + 10 * c3),
Constraint(('S', 'M', 'O', 'C3', 'C4'), lambda s, m, o, c3, c4: c3 + s + m == o + 10 * c4),
Constraint(('M', 'C4'), eq)])
| Kakuro |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-construct/test_flow_except.py | {
"start": 4189,
"end": 4868
} | class ____(Executor):
@requests
def craft(self, *args, **kwargs):
raise NotImplementedError
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_on_error_callback(protocol):
f = Flow(protocol=protocol).add(uses=DummyCrafterNotImplemented)
hit = []
def f1(*args):
hit.append('done')
def f2(*args):
hit.append('error')
def f3(*args):
hit.append('always')
with f:
f.index(
from_ndarray(np.random.random([10, 10])),
on_done=f1,
on_error=f2,
on_always=f3,
)
assert hit == ['error', 'always']
| DummyCrafterNotImplemented |
python | rq__rq | rq/worker.py | {
"start": 65525,
"end": 71443
} | class ____(BaseWorker):
def kill_horse(self, sig: signal.Signals = SHUTDOWN_SIGNAL):
"""Kill the horse but catch "No such process" error has the horse could already be dead.
Args:
sig (signal.Signals, optional): _description_. Defaults to SIGKILL.
"""
try:
os.killpg(os.getpgid(self.horse_pid), sig)
self.log.info('Worker %s: killed horse pid %s', self.name, self.horse_pid)
except OSError as e:
if e.errno == errno.ESRCH:
# "No such process" is fine with us
self.log.debug('Worker %s: horse already dead', self.name)
else:
raise
def wait_for_horse(self) -> tuple[Optional[int], Optional[int], Optional['struct_rusage']]:
"""Waits for the horse process to complete.
Uses `0` as argument as to include "any child in the process group of the current process".
"""
pid = stat = rusage = None
with contextlib.suppress(ChildProcessError): # ChildProcessError: [Errno 10] No child processes
pid, stat, rusage = os.wait4(self.horse_pid, 0)
return pid, stat, rusage
def fork_work_horse(self, job: 'Job', queue: 'Queue'):
"""Spawns a work horse to perform the actual work and passes it a job.
This is where the `fork()` actually happens.
Args:
job (Job): The Job that will be ran
queue (Queue): The queue
"""
child_pid = os.fork()
os.environ['RQ_WORKER_ID'] = self.name
os.environ['RQ_JOB_ID'] = job.id
if child_pid == 0:
os.setpgrp()
self.main_work_horse(job, queue)
os._exit(0) # just in case
else:
self._horse_pid = child_pid
self.procline(f'Forked {child_pid} at {time.time()}')
def monitor_work_horse(self, job: 'Job', queue: 'Queue'):
"""The worker will monitor the work horse and make sure that it
either executes successfully or the status of the job is set to
failed
Args:
job (Job): _description_
queue (Queue): _description_
"""
retpid = ret_val = rusage = None
job.started_at = now()
while True:
try:
with self.death_penalty_class(self.job_monitoring_interval, HorseMonitorTimeoutException):
retpid, ret_val, rusage = self.wait_for_horse()
break
except HorseMonitorTimeoutException:
# Horse has not exited yet and is still running.
# Send a heartbeat to keep the worker alive.
self.set_current_job_working_time((now() - job.started_at).total_seconds())
# Kill the job from this side if something is really wrong (interpreter lock/etc).
if job.timeout != -1 and self.current_job_working_time > (job.timeout + 60): # type: ignore
self.heartbeat(self.job_monitoring_interval + 60)
self.kill_horse()
self.wait_for_horse()
break
self.maintain_heartbeats(job)
except OSError as e:
# In case we encountered an OSError due to EINTR (which is
# caused by a SIGINT or SIGTERM signal during
# os.waitpid()), we simply ignore it and enter the next
# iteration of the loop, waiting for the child to end. In
# any other case, this is some other unexpected OS error,
# which we don't want to catch, so we re-raise those ones.
if e.errno != errno.EINTR:
raise
# Send a heartbeat to keep the worker alive.
self.heartbeat()
self.set_current_job_working_time(0)
self._horse_pid = 0 # Set horse PID to 0, horse has finished working
self.log.debug(
'Worker %s: work horse finished for job %s: retpid=%s, ret_val=%s', self.name, job.id, retpid, ret_val
)
if ret_val == os.EX_OK: # The process exited normally.
return
try:
job_status = job.get_status()
except InvalidJobOperation:
return # Job completed and its ttl has expired
if self._stopped_job_id == job.id:
# Work-horse killed deliberately
self.log.warning('Worker %s: job %s stopped by user, moving job to FailedJobRegistry', self.name, job.id)
if job.stopped_callback:
job.execute_stopped_callback(self.death_penalty_class)
self.handle_job_failure(job, queue=queue, exc_string='Job stopped by user, work-horse terminated.')
elif job_status not in [JobStatus.FINISHED, JobStatus.FAILED]:
if not job.ended_at:
job.ended_at = now()
# Unhandled failure: move the job to the failed queue
signal_msg = f' (signal {os.WTERMSIG(ret_val)})' if ret_val and os.WIFSIGNALED(ret_val) else ''
exc_string = f'Work-horse terminated unexpectedly; waitpid returned {ret_val}{signal_msg}; '
self.log.warning('Worker %s: moving job %s to FailedJobRegistry (%s)', self.name, job.id, exc_string)
self.handle_work_horse_killed(job, retpid, ret_val, rusage)
self.handle_job_failure(job, queue=queue, exc_string=exc_string)
def execute_job(self, job: 'Job', queue: 'Queue'):
"""Spawns a work horse to perform the actual work and passes it a job.
The worker will wait for the work horse and make sure it executes
within the given timeout bounds, or will end the work horse with
SIGALRM.
"""
self.prepare_execution(job)
self.fork_work_horse(job, queue)
self.monitor_work_horse(job, queue)
self.set_state(WorkerStatus.IDLE)
| Worker |
python | apache__airflow | helm-tests/tests/helm_tests/apiserver/test_apiserver.py | {
"start": 3094,
"end": 3622
} | class ____:
"""Tests API Server JWT secret."""
def test_should_add_annotations_to_jwt_secret(self):
docs = render_chart(
values={
"jwtSecretAnnotations": {"test_annotation": "test_annotation_value"},
},
show_only=["templates/secrets/jwt-secret.yaml"],
)[0]
assert "annotations" in jmespath.search("metadata", docs)
assert jmespath.search("metadata.annotations", docs)["test_annotation"] == "test_annotation_value"
| TestAPIServerJWTSecret |
python | walkccc__LeetCode | solutions/3530. Maximum Profit from Valid Topological Order in DAG/3530.py | {
"start": 0,
"end": 1040
} | class ____:
def maxProfit(self, n: int, edges: list[list[int]], score: list[int]) -> int:
# need[i] := the bitmask representing all nodes that must be placed before
# node i
need = [0] * n
# dp[mask] := the maximum profit achievable by placing the set of nodes
# represented by `mask`
dp = [-1] * (1 << n)
dp[0] = 0
for u, v in edges:
need[v] |= 1 << u
# Iterate over all subsets of nodes (represented by bitmask `mask`)
for mask in range(1 << n):
if dp[mask] == -1:
continue
# Determine the position of the next node to be placed (1-based).
pos = mask.bit_count() + 1
# Try to place each node `i` that hasn't been placed yet.
for i in range(n):
if mask >> i & 1:
continue
# Check if all dependencies of node `i` are already placed.
if (mask & need[i]) == need[i]:
newMask = mask | 1 << i # Mark node `i` as placed.
dp[newMask] = max(dp[newMask], dp[mask] + score[i] * pos)
return dp[-1]
| Solution |
python | facebookresearch__faiss | tests/test_rabitq.py | {
"start": 51130,
"end": 53715
} | class ____(unittest.TestCase):
"""Test construction and parameter validation for multi-bit RaBitQ."""
def test_valid_nb_bits_range(self):
"""Test that nb_bits 1-9 are valid."""
d = 128
for nb_bits in range(1, 10):
for metric in [faiss.METRIC_L2, faiss.METRIC_INNER_PRODUCT]:
index = faiss.IndexRaBitQ(d, metric, nb_bits)
self.assertEqual(index.d, d)
self.assertEqual(index.metric_type, metric)
self.assertEqual(index.rabitq.nb_bits, nb_bits)
self.assertFalse(index.is_trained)
def test_invalid_nb_bits_zero(self):
"""Test that nb_bits=0 raises error."""
with self.assertRaises(RuntimeError):
faiss.IndexRaBitQ(128, faiss.METRIC_L2, 0)
def test_invalid_nb_bits_too_large(self):
"""Test that nb_bits=10 raises error."""
with self.assertRaises(RuntimeError):
faiss.IndexRaBitQ(128, faiss.METRIC_L2, 10)
def test_code_size_formula(self):
"""Test that code sizes match expected formula for all nb_bits."""
d = 128
for nb_bits in range(1, 10):
index = faiss.IndexRaBitQ(d, faiss.METRIC_L2, nb_bits)
expected_size = compute_expected_code_size(d, nb_bits)
self.assertEqual(
index.code_size,
expected_size,
f"Code size mismatch for nb_bits={nb_bits}",
)
def test_ivf_construction_valid_nb_bits(self):
"""Test IndexIVFRaBitQ construction with valid nb_bits."""
d = 64
nlist = 16
for nb_bits in range(1, 10):
quantizer = faiss.IndexFlat(d, faiss.METRIC_L2)
index = faiss.IndexIVFRaBitQ(
quantizer, d, nlist, faiss.METRIC_L2, True, nb_bits
)
self.assertEqual(index.rabitq.nb_bits, nb_bits)
self.assertEqual(index.d, d)
expected_size = compute_expected_code_size(d, nb_bits)
self.assertEqual(index.code_size, expected_size)
def test_ivf_construction_invalid_nb_bits(self):
"""Test that IndexIVFRaBitQ rejects invalid nb_bits."""
d = 64
nlist = 16
quantizer = faiss.IndexFlat(d, faiss.METRIC_L2)
with self.assertRaises(RuntimeError):
faiss.IndexIVFRaBitQ(quantizer, d, nlist, faiss.METRIC_L2, True, 0)
with self.assertRaises(RuntimeError):
faiss.IndexIVFRaBitQ(
quantizer, d, nlist, faiss.METRIC_L2, True, 10
)
| TestMultiBitRaBitQConstruction |
python | huggingface__transformers | tests/models/mobilebert/test_modeling_mobilebert.py | {
"start": 14686,
"end": 17682
} | class ____(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = MobileBertModel.from_pretrained("google/mobilebert-uncased", attn_implementation="eager").to(
torch_device
)
input_ids = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 9, 512))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
],
device=torch_device,
)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lower_bound = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
upper_bound = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
@pytest.mark.torch_export_test
@slow
def test_export(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
mobilebert_model = "google/mobilebert-uncased"
device = "cpu"
attn_implementation = "eager"
max_length = 512
tokenizer = AutoTokenizer.from_pretrained(mobilebert_model)
inputs = tokenizer(
f"the man worked as a {tokenizer.mask_token}.",
return_tensors="pt",
padding="max_length",
max_length=max_length,
)
model = MobileBertForMaskedLM.from_pretrained(
mobilebert_model,
device_map=device,
attn_implementation=attn_implementation,
)
logits = model(**inputs).logits
eg_predicted_mask = tokenizer.decode(logits[0, 6].topk(5).indices)
self.assertEqual(eg_predicted_mask.split(), ["carpenter", "waiter", "mechanic", "teacher", "clerk"])
exported_program = torch.export.export(
model,
args=(inputs["input_ids"],),
kwargs={"attention_mask": inputs["attention_mask"]},
strict=True,
)
result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"])
ep_predicted_mask = tokenizer.decode(result.logits[0, 6].topk(5).indices)
self.assertEqual(eg_predicted_mask, ep_predicted_mask)
| MobileBertModelIntegrationTests |
python | kubernetes-client__python | kubernetes/client/models/v1_status.py | {
"start": 383,
"end": 10061
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'code': 'int',
'details': 'V1StatusDetails',
'kind': 'str',
'message': 'str',
'metadata': 'V1ListMeta',
'reason': 'str',
'status': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'code': 'code',
'details': 'details',
'kind': 'kind',
'message': 'message',
'metadata': 'metadata',
'reason': 'reason',
'status': 'status'
}
def __init__(self, api_version=None, code=None, details=None, kind=None, message=None, metadata=None, reason=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1Status - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._code = None
self._details = None
self._kind = None
self._message = None
self._metadata = None
self._reason = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if code is not None:
self.code = code
if details is not None:
self.details = details
if kind is not None:
self.kind = kind
if message is not None:
self.message = message
if metadata is not None:
self.metadata = metadata
if reason is not None:
self.reason = reason
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Status. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Status. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Status.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Status. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def code(self):
"""Gets the code of this V1Status. # noqa: E501
Suggested HTTP return code for this status, 0 if not set. # noqa: E501
:return: The code of this V1Status. # noqa: E501
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this V1Status.
Suggested HTTP return code for this status, 0 if not set. # noqa: E501
:param code: The code of this V1Status. # noqa: E501
:type: int
"""
self._code = code
@property
def details(self):
"""Gets the details of this V1Status. # noqa: E501
:return: The details of this V1Status. # noqa: E501
:rtype: V1StatusDetails
"""
return self._details
@details.setter
def details(self, details):
"""Sets the details of this V1Status.
:param details: The details of this V1Status. # noqa: E501
:type: V1StatusDetails
"""
self._details = details
@property
def kind(self):
"""Gets the kind of this V1Status. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Status. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Status.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Status. # noqa: E501
:type: str
"""
self._kind = kind
@property
def message(self):
"""Gets the message of this V1Status. # noqa: E501
A human-readable description of the status of this operation. # noqa: E501
:return: The message of this V1Status. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1Status.
A human-readable description of the status of this operation. # noqa: E501
:param message: The message of this V1Status. # noqa: E501
:type: str
"""
self._message = message
@property
def metadata(self):
"""Gets the metadata of this V1Status. # noqa: E501
:return: The metadata of this V1Status. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Status.
:param metadata: The metadata of this V1Status. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
@property
def reason(self):
"""Gets the reason of this V1Status. # noqa: E501
A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it. # noqa: E501
:return: The reason of this V1Status. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1Status.
A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it. # noqa: E501
:param reason: The reason of this V1Status. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1Status. # noqa: E501
Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status # noqa: E501
:return: The status of this V1Status. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Status.
Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status # noqa: E501
:param status: The status of this V1Status. # noqa: E501
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Status):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Status):
return True
return self.to_dict() != other.to_dict()
| V1Status |
python | getsentry__sentry | src/sentry/issues/escalating/escalating_issues_alg.py | {
"start": 221,
"end": 354
} | class ____(TypedDict):
intervals: list[str]
data: list[int]
# standard values if no parameters are passed
@dataclass
| GroupCount |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP040.py | {
"start": 695,
"end": 3214
} | class ____:
# reference to global variable
x: typing.TypeAlias = list[T]
# reference to class variable
TCLS = typing.TypeVar["TCLS"]
y: typing.TypeAlias = list[TCLS]
# UP040 won't add generics in fix
T = typing.TypeVar(*args)
x: typing.TypeAlias = list[T]
# `default` was added in Python 3.13
T = typing.TypeVar("T", default=Any)
x: typing.TypeAlias = list[T]
# OK
x: TypeAlias
x: int = 1
# Ensure that "T" appears only once in the type parameters for the modernized
# type alias.
T = typing.TypeVar["T"]
Decorator: TypeAlias = typing.Callable[[T], T]
from typing import TypeVar, Annotated, TypeAliasType
from annotated_types import Gt, SupportGt
# https://github.com/astral-sh/ruff/issues/11422
T = TypeVar("T")
PositiveList = TypeAliasType(
"PositiveList", list[Annotated[T, Gt(0)]], type_params=(T,)
)
# Bound
T = TypeVar("T", bound=SupportGt)
PositiveList = TypeAliasType(
"PositiveList", list[Annotated[T, Gt(0)]], type_params=(T,)
)
# Multiple bounds
T1 = TypeVar("T1", bound=SupportGt)
T2 = TypeVar("T2")
T3 = TypeVar("T3")
Tuple3 = TypeAliasType("Tuple3", tuple[T1, T2, T3], type_params=(T1, T2, T3))
# No type_params
PositiveInt = TypeAliasType("PositiveInt", Annotated[int, Gt(0)])
PositiveInt = TypeAliasType("PositiveInt", Annotated[int, Gt(0)], type_params=())
# OK: Other name
T = TypeVar("T", bound=SupportGt)
PositiveList = TypeAliasType(
"PositiveList2", list[Annotated[T, Gt(0)]], type_params=(T,)
)
# `default` was added in Python 3.13
T = typing.TypeVar("T", default=Any)
AnyList = TypeAliasType("AnyList", list[T], type_params=(T,))
# unsafe fix if comments within the fix
T = TypeVar("T")
PositiveList = TypeAliasType( # eaten comment
"PositiveList", list[Annotated[T, Gt(0)]], type_params=(T,)
)
T = TypeVar("T")
PositiveList = TypeAliasType(
"PositiveList", list[Annotated[T, Gt(0)]], type_params=(T,)
) # this comment should be okay
# this comment will actually be preserved because it's inside the "value" part
T = TypeVar("T")
PositiveList = TypeAliasType(
"PositiveList", list[
Annotated[T, Gt(0)], # preserved comment
], type_params=(T,)
)
T: TypeAlias = (
int
| str
)
T: TypeAlias = ( # comment0
# comment1
int # comment2
# comment3
| # comment4
# comment5
str # comment6
# comment7
) # comment8
# Test case for TypeVar with default - should be converted when preview mode is enabled
T_default = TypeVar("T_default", default=int)
DefaultList: TypeAlias = list[T_default]
| Foo |
python | mkdocs__mkdocs | mkdocs/contrib/search/__init__.py | {
"start": 1892,
"end": 2209
} | class ____(base.Config):
lang = c.Optional(LangOption())
separator = c.Type(str, default=r'[\s\-]+')
min_search_length = c.Type(int, default=3)
prebuild_index = c.Choice((False, True, 'node', 'python'), default=False)
indexing = c.Choice(('full', 'sections', 'titles'), default='full')
| _PluginConfig |
python | cython__cython | Cython/Compiler/Optimize.py | {
"start": 87346,
"end": 184440
} | class ____(Visitor.NodeRefCleanupMixin,
Visitor.MethodDispatcherTransform):
"""Optimize some common methods calls and instantiation patterns
for builtin types *after* the type analysis phase.
Running after type analysis, this transform can only perform
function replacements that do not alter the function return type
in a way that was not anticipated by the type analysis.
"""
### cleanup to avoid redundant coercions to/from Python types
def visit_PyTypeTestNode(self, node):
"""Flatten redundant type checks after tree changes.
"""
self.visitchildren(node)
return node.reanalyse()
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
# cast, even if it looks redundant to Cython
"""
Drop redundant type casts.
"""
self.visitchildren(node)
if node.type == node.operand.type:
return node.operand
return node
def visit_ExprStatNode(self, node):
"""
Drop dead code and useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
expr = node.expr
if expr is None or expr.is_none or expr.is_literal:
# Expression was removed or is dead code => remove ExprStatNode as well.
return None
if expr.is_name and expr.entry and (expr.entry.is_local or expr.entry.is_arg):
# Ignore dead references to local variables etc.
return None
return node
def visit_CoerceToBooleanNode(self, node):
"""Drop redundant conversion nodes after tree changes.
"""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type):
return arg.arg.coerce_to_boolean(self.current_env())
return node
PyNumber_Float_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
])
def visit_CoerceToPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes."""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.CoerceFromPyTypeNode):
arg = arg.arg
if isinstance(arg, ExprNodes.PythonCapiCallNode):
if arg.function.name == 'float' and len(arg.args) == 1:
# undo redundant Py->C->Py coercion
func_arg = arg.args[0]
if func_arg.type is Builtin.float_type:
return func_arg.as_none_safe_node("float() argument must be a string or a number, not 'NoneType'")
elif func_arg.type.is_pyobject and arg.function.cname == "__Pyx_PyObject_AsDouble":
return ExprNodes.PythonCapiCallNode(
node.pos, '__Pyx_PyNumber_Float', self.PyNumber_Float_func_type,
args=[func_arg],
py_name='float',
is_temp=node.is_temp,
utility_code = UtilityCode.load_cached("pynumber_float", "TypeConversion.c"),
result_is_used=node.result_is_used,
).coerce_to(node.type, self.current_env())
return node
def visit_CoerceFromPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes.
Also, optimise away calls to Python's builtin int() and
float() if the result is going to be coerced back into a C
type anyway.
"""
self.visitchildren(node)
arg = node.arg
if not arg.type.is_pyobject:
# no Python conversion left at all, just do a C coercion instead
if node.type != arg.type:
arg = arg.coerce_to(node.type, self.current_env())
return arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if arg.is_literal:
if (node.type.is_int and isinstance(arg, ExprNodes.IntNode) or
node.type.is_float and isinstance(arg, ExprNodes.FloatNode) or
node.type.is_int and isinstance(arg, ExprNodes.BoolNode)):
return arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type is PyrexTypes.py_object_type:
if node.type.assignable_from(arg.arg.type):
# completely redundant C->Py->C coercion
return arg.arg.coerce_to(node.type, self.current_env())
elif arg.type is Builtin.unicode_type:
if arg.arg.type.is_unicode_char and node.type.is_unicode_char:
return arg.arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.SimpleCallNode):
if node.type.is_int or node.type.is_float:
return self._optimise_numeric_cast_call(node, arg)
return node
float_float_func_types = {
float_type: PyrexTypes.CFuncType(
float_type, [
PyrexTypes.CFuncTypeArg("arg", float_type, None)
])
for float_type in (PyrexTypes.c_float_type, PyrexTypes.c_double_type, PyrexTypes.c_longdouble_type)
}
def _optimise_numeric_cast_call(self, node, arg):
function = arg.function
args = None
if isinstance(arg, ExprNodes.PythonCapiCallNode):
args = arg.args
elif isinstance(function, ExprNodes.NameNode):
if function.type.is_builtin_type and isinstance(arg.arg_tuple, ExprNodes.TupleNode):
args = arg.arg_tuple.args
if args is None or len(args) != 1:
return node
func_arg = args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
elif func_arg.type.is_pyobject:
# play it safe: Python conversion might work on all sorts of things
return node
if function.name == 'int':
if func_arg.type.is_int or node.type.is_int:
if func_arg.type == node.type:
return func_arg
elif func_arg.type in (PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type):
# need to parse (<Py_UCS4>'1') as digit 1
return self._pyucs4_to_number(node, function.name, func_arg)
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(node.pos, operand=func_arg, type=node.type)
elif func_arg.type.is_float and node.type.is_numeric:
if func_arg.type.math_h_modifier == 'l':
# Work around missing Cygwin definition.
truncl = '__Pyx_truncl'
else:
truncl = 'trunc' + func_arg.type.math_h_modifier
return ExprNodes.PythonCapiCallNode(
node.pos, truncl,
func_type=self.float_float_func_types[func_arg.type],
args=[func_arg],
py_name='int',
is_temp=node.is_temp,
result_is_used=node.result_is_used,
).coerce_to(node.type, self.current_env())
elif function.name == 'float':
if func_arg.type.is_float or node.type.is_float:
if func_arg.type == node.type:
return func_arg
elif func_arg.type in (PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type):
# need to parse (<Py_UCS4>'1') as digit 1
return self._pyucs4_to_number(node, function.name, func_arg)
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return node
pyucs4_int_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_py_ucs4_type, None)
],
exception_value=-1)
pyucs4_double_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_double_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_py_ucs4_type, None)
],
exception_value=-1.0)
def _pyucs4_to_number(self, node, py_type_name, func_arg):
assert py_type_name in ("int", "float")
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_int_from_UCS4" if py_type_name == "int" else "__Pyx_double_from_UCS4",
func_type=self.pyucs4_int_func_type if py_type_name == "int" else self.pyucs4_double_func_type,
args=[func_arg],
py_name=py_type_name,
is_temp=node.is_temp,
result_is_used=node.result_is_used,
utility_code=UtilityCode.load_cached("int_pyucs4" if py_type_name == "int" else "float_pyucs4", "Builtins.c"),
).coerce_to(node.type, self.current_env())
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, str) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
### generic fallbacks
def _handle_function(self, node, function_name, function, arg_list, kwargs):
return node
def _handle_method(self, node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs):
"""
Try to inject C-API calls for unbound method calls to builtin types.
While the method declarations in Builtin.py already handle this, we
can additionally resolve bound and unbound methods here that were
assigned to variables ahead of time.
"""
if kwargs:
return node
if not function or not function.is_attribute or not function.obj.is_name:
# cannot track unbound method calls over more than one indirection as
# the names might have been reassigned in the meantime
return node
type_entry = self.current_env().lookup(type_name)
if not type_entry:
return node
method = ExprNodes.AttributeNode(
node.function.pos,
obj=ExprNodes.NameNode(
function.pos,
name=type_name,
entry=type_entry,
type=type_entry.type),
attribute=attr_name,
is_called=True).analyse_as_type_attribute(self.current_env())
if method is None:
return self._optimise_generic_builtin_method_call(
node, attr_name, function, arg_list, is_unbound_method)
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
call_node = ExprNodes.SimpleCallNode(
node.pos,
function=method,
args=args)
if not is_unbound_method:
call_node.self = function.obj
call_node.analyse_c_function_call(self.current_env())
call_node.analysed = True
return call_node.coerce_to(node.type, self.current_env())
### builtin types
def _optimise_generic_builtin_method_call(self, node, attr_name, function, arg_list, is_unbound_method):
"""
Try to inject an unbound method call for a call to a method of a known builtin type.
This enables caching the underlying C function of the method at runtime.
"""
arg_count = len(arg_list)
if is_unbound_method or arg_count >= 3 or not (function.is_attribute and function.is_py_attr):
return node
if not function.obj.type.is_builtin_type:
return node
if function.obj.type is Builtin.type_type:
# allows different actual types => unsafe
return node
return ExprNodes.CachedBuiltinMethodCallNode(
node, function.obj, attr_name, arg_list)
PyObject_Unicode_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_unicode(self, node, function, pos_args):
"""Optimise single argument calls to unicode().
"""
if len(pos_args) != 1:
if len(pos_args) == 0:
return ExprNodes.UnicodeNode(node.pos, value=EncodedString())
return node
arg = pos_args[0]
if arg.type is Builtin.unicode_type:
if not arg.may_be_none():
return arg
cname = "__Pyx_PyUnicode_Unicode"
utility_code = UtilityCode.load_cached('PyUnicode_Unicode', 'StringTools.c')
else:
cname = "__Pyx_PyObject_Unicode"
utility_code = UtilityCode.load_cached('PyObject_Unicode', 'StringTools.c')
return ExprNodes.PythonCapiCallNode(
node.pos, cname, self.PyObject_Unicode_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=utility_code,
py_name="unicode")
_handle_simple_function_str = _handle_simple_function_unicode
def visit_FormattedValueNode(self, node):
"""Simplify or avoid plain string formatting of a unicode value.
This seems misplaced here, but plain unicode formatting is essentially
a call to the unicode() builtin, which is optimised right above.
"""
self.visitchildren(node)
if node.value.type is Builtin.unicode_type and not node.c_format_spec and not node.format_spec:
if not node.conversion_char or node.conversion_char == 's':
# value is definitely a unicode string and we don't format it any special
return self._handle_simple_function_unicode(node, None, [node.value])
return node
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
])
def _handle_simple_function_dict(self, node, function, pos_args):
"""Replace dict(some_dict) by PyDict_Copy(some_dict).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node("'NoneType' is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyDict_Copy", self.PyDict_Copy_func_type,
args = [arg],
is_temp = node.is_temp
)
return node
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_list(self, node, function, pos_args):
"""Turn list(ob) into PySequence_List(ob).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
return ExprNodes.PythonCapiCallNode(
node.pos,
"__Pyx_PySequence_ListKeepNew"
if (node.result_in_temp() and arg.result_in_temp() and
arg.type in (PyrexTypes.py_object_type, Builtin.list_type))
else "PySequence_List",
self.PySequence_List_func_type,
args=pos_args,
is_temp=node.is_temp,
)
PyList_AsTuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type, [
PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None)
])
def _handle_simple_function_tuple(self, node, function, pos_args):
"""Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple.
"""
if len(pos_args) != 1 or not node.result_in_temp():
return node
arg = pos_args[0]
if arg.type is Builtin.tuple_type and not arg.may_be_none():
return arg
if arg.type is Builtin.list_type:
pos_args[0] = arg.as_none_safe_node(
"'NoneType' object is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
args=pos_args, is_temp=True)
else:
return ExprNodes.AsTupleNode(node.pos, arg=arg, type=Builtin.tuple_type)
PySet_New_func_type = PyrexTypes.CFuncType(
Builtin.set_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_set(self, node, function, pos_args):
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor:
# We can optimise set([x,y,z]) safely into a set literal,
# but only if we create all items before adding them -
# adding an item may raise an exception if it is not
# hashable, but creating the later items may have
# side-effects.
args = []
temps = []
for arg in pos_args[0].args:
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
args.append(arg)
result = ExprNodes.SetNode(node.pos, is_temp=1, args=args)
self.replace(node, result)
for temp in temps[::-1]:
result = UtilNodes.EvalWithTempExprNode(temp, result)
return result
else:
# PySet_New(it) is better than a generic Python call to set(it)
return self.replace(node, ExprNodes.PythonCapiCallNode(
node.pos, "PySet_New",
self.PySet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
py_name="set"))
PyFrozenSet_New_func_type = PyrexTypes.CFuncType(
Builtin.frozenset_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_frozenset(self, node, function, pos_args):
if not pos_args:
pos_args = [ExprNodes.NullNode(node.pos)]
elif len(pos_args) > 1:
return node
elif pos_args[0].type is Builtin.frozenset_type and not pos_args[0].may_be_none():
return pos_args[0]
# PyFrozenSet_New(it) is better than a generic Python call to frozenset(it)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyFrozenSet_New",
self.PyFrozenSet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached('pyfrozenset_new', 'Builtins.c'),
py_name="frozenset")
PyObject_AsDouble_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_double_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "((double)-1)",
exception_check = True)
def _handle_simple_function_float(self, node, function, pos_args):
"""Transform float() into either a C type cast or a faster C
function call.
"""
# Note: this requires the float() function to be typed as
# returning a C 'double'
if len(pos_args) == 0:
return ExprNodes.FloatNode(
node, value="0.0", constant_result=0.0
).coerce_to(Builtin.float_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('float', node, pos_args, '0 or 1')
return node
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
if func_arg.type is PyrexTypes.c_double_type:
return func_arg
elif func_arg.type in (PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type):
# need to parse (<Py_UCS4>'1') as digit 1
return self._pyucs4_to_number(node, function.name, func_arg)
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
arg = pos_args[0].as_none_safe_node(
"float() argument must be a string or a number, not 'NoneType'")
if func_arg.type is Builtin.bytes_type:
cfunc_name = "__Pyx_PyBytes_AsDouble"
utility_code_name = 'pybytes_as_double'
elif func_arg.type is Builtin.bytearray_type:
cfunc_name = "__Pyx_PyByteArray_AsDouble"
utility_code_name = 'pybytes_as_double'
elif func_arg.type is Builtin.unicode_type:
cfunc_name = "__Pyx_PyUnicode_AsDouble"
utility_code_name = 'pyunicode_as_double'
elif func_arg.type is Builtin.int_type:
cfunc_name = "PyLong_AsDouble"
utility_code_name = None
else:
arg = pos_args[0] # no need for an additional None check
cfunc_name = "__Pyx_PyObject_AsDouble"
utility_code_name = 'pyobject_as_double'
return ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name,
self.PyObject_AsDouble_func_type,
args = [arg],
is_temp = node.is_temp,
utility_code = load_c_utility(utility_code_name) if utility_code_name else None,
py_name = "float")
PyNumber_Int_func_type = PyrexTypes.CFuncType(
Builtin.int_type, [
PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
])
PyLong_FromDouble_func_type = PyrexTypes.CFuncType(
Builtin.int_type, [
PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_double_type, None)
])
def _handle_simple_function_int(self, node, function, pos_args):
"""Transform int() into a faster C function call.
"""
if len(pos_args) == 0:
return ExprNodes.IntNode.for_int(node.pos, 0, type=Builtin.int_type)
elif len(pos_args) != 1:
return node # int(x, base)
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
if func_arg.arg.type.is_float:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyLong_FromDouble", self.PyLong_FromDouble_func_type,
args=[func_arg.arg], is_temp=True, py_name='int',
)
else:
return node # handled in visit_CoerceFromPyTypeNode()
if func_arg.type.is_pyobject and node.type.is_pyobject:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyNumber_Int", self.PyNumber_Int_func_type,
args=pos_args, is_temp=True, py_name='int')
return node
def _handle_simple_function_bool(self, node, function, pos_args):
"""Transform bool(x) into a type coercion to a boolean.
"""
if len(pos_args) == 0:
return ExprNodes.BoolNode(node.pos, value=False, type=Builtin.bool_type)
elif len(pos_args) != 1:
self._error_wrong_arg_count('bool', node, pos_args, '0 or 1')
return node
else:
# => !!<bint>(x) to make sure it's exactly 0 or 1
operand = pos_args[0].coerce_to_boolean(self.current_env())
operand = ExprNodes.NotNode(node.pos, operand = operand)
operand = ExprNodes.NotNode(node.pos, operand = operand)
# coerce back to Python object as that's the result we are expecting
return operand.coerce_to_pyobject(self.current_env())
PyMemoryView_FromObject_func_type = PyrexTypes.CFuncType(
Builtin.memoryview_type, [
PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None)
])
PyMemoryView_FromBuffer_func_type = PyrexTypes.CFuncType(
Builtin.memoryview_type, [
PyrexTypes.CFuncTypeArg("value", Builtin.py_buffer_type, None)
])
def _handle_simple_function_memoryview(self, node, function, pos_args):
if len(pos_args) != 1:
self._error_wrong_arg_count('memoryview', node, pos_args, '1')
return node
else:
if pos_args[0].type.is_pyobject:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyMemoryView_FromObject",
self.PyMemoryView_FromObject_func_type,
args = [pos_args[0]],
is_temp = node.is_temp,
py_name = "memoryview")
elif pos_args[0].type.is_ptr and pos_args[0].base_type is Builtin.py_buffer_type:
# TODO - this currently doesn't work because the buffer fails a
# "can coerce to python object" test earlier. But it'd be nice to support
return ExprNodes.PythonCapiCallNode(
node.pos, "PyMemoryView_FromBuffer",
self.PyMemoryView_FromBuffer_func_type,
args = [pos_args[0]],
is_temp = node.is_temp,
py_name = "memoryview")
return node
### builtin functions
Pyx_ssize_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_const_char_ptr_type, None)
],
exception_value=-1)
Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_const_py_unicode_ptr_type, None)
],
exception_value=-1)
PyObject_Size_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
],
exception_value=-1)
_map_to_capi_len_function = {
Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type: "__Pyx_PyBytes_GET_SIZE",
Builtin.bytearray_type: '__Pyx_PyByteArray_GET_SIZE',
Builtin.list_type: "__Pyx_PyList_GET_SIZE",
Builtin.tuple_type: "__Pyx_PyTuple_GET_SIZE",
Builtin.set_type: "__Pyx_PySet_GET_SIZE",
Builtin.frozenset_type: "__Pyx_PySet_GET_SIZE",
Builtin.dict_type: "PyDict_Size",
}.get
_ext_types_with_pysize = {"cpython.array.array"}
def _handle_simple_function_len(self, node, function, pos_args):
"""Replace len(char*) by the equivalent call to strlen(),
len(Py_UNICODE) by the equivalent Py_UNICODE_strlen() and
len(known_builtin_type) by an equivalent C-API call.
"""
if len(pos_args) != 1:
self._error_wrong_arg_count('len', node, pos_args, 1)
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
arg = arg.arg
if arg.type.is_string:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_ssize_strlen", self.Pyx_ssize_strlen_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_pyunicode_ptr:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py_UNICODE_ssize_strlen", self.Pyx_Py_UNICODE_strlen_func_type,
args = [arg],
is_temp = node.is_temp,
utility_code = UtilityCode.load_cached("ssize_pyunicode_strlen", "StringTools.c"))
elif arg.type.is_memoryviewslice:
func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("memoryviewslice", arg.type, None)
], nogil=True)
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_MemoryView_Len", func_type,
args=[arg], is_temp=node.is_temp)
elif arg.type.is_pyobject:
cfunc_name = self._map_to_capi_len_function(arg.type)
if cfunc_name is None:
arg_type = arg.type
if ((arg_type.is_extension_type or arg_type.is_builtin_type)
and arg_type.entry.qualified_name in self._ext_types_with_pysize):
cfunc_name = 'Py_SIZE'
else:
return node
arg = arg.as_none_safe_node(
"object of type 'NoneType' has no len()")
new_node = ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name, self.PyObject_Size_func_type,
args=[arg], is_temp=node.is_temp)
elif arg.type.is_unicode_char:
return ExprNodes.IntNode.for_int(node.pos, 1, type=node.type)
else:
return node
if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type):
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
Pyx_Type_func_type = PyrexTypes.CFuncType(
Builtin.type_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_type(self, node, function, pos_args):
"""Replace type(o) by a macro call to Py_TYPE(o).
"""
if len(pos_args) != 1:
return node
node = ExprNodes.PythonCapiCallNode(
node.pos, "Py_TYPE", self.Pyx_Type_func_type,
args = pos_args,
is_temp = False)
return ExprNodes.CastNode(node, PyrexTypes.py_object_type)
Py_type_check_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_isinstance(self, node, function, pos_args):
"""Replace isinstance() checks against builtin types by the
corresponding C-API call.
"""
if len(pos_args) != 2:
return node
arg, type_nodes = pos_args
temps = []
if isinstance(type_nodes, ExprNodes.TupleNode):
type_nodes = type_nodes.args
elif type_nodes.type is Builtin.type_type or isinstance(type_nodes, ExprNodes.BitwiseOrNode):
type_nodes = [type_nodes]
else:
return node
# Unpack 'int | float | None' etc.
types, allowed_none_node = _unpack_union_type_nodes(type_nodes)
# Map the separate type checks to check functions.
if types and (allowed_none_node or len(types) > 1):
if arg.is_attribute or not arg.is_simple():
arg = UtilNodes.ResultRefNode(arg)
temps.append(arg)
test_nodes = []
env = self.current_env()
if allowed_none_node is not None:
test_nodes.append(
ExprNodes.PrimaryCmpNode(
allowed_none_node.pos,
operand1=arg,
operator='is',
operand2=allowed_none_node,
).analyse_types(env).coerce_to(PyrexTypes.c_bint_type, env))
builtin_tests = set()
for test_type_node in types:
builtin_type = entry = None
if test_type_node.is_name and test_type_node.entry:
entry = env.lookup(test_type_node.entry.name)
if entry and entry.type and entry.type.is_builtin_type:
builtin_type = entry.type
if builtin_type is Builtin.type_type:
# all types have type "type", but there's only one 'type'
if entry.name != 'type' or not (
entry.scope and entry.scope.is_builtin_scope):
builtin_type = None
if builtin_type is not None:
type_check_function = builtin_type.type_check_function(exact=False)
if type_check_function in builtin_tests:
continue
builtin_tests.add(type_check_function)
type_check_args = [arg]
elif test_type_node.type is Builtin.type_type:
type_check_function = '__Pyx_TypeCheck'
type_check_args = [arg, test_type_node]
else:
if not test_type_node.is_literal:
test_type_node = UtilNodes.ResultRefNode(test_type_node)
temps.append(test_type_node)
type_check_function = 'PyObject_IsInstance'
type_check_args = [arg, test_type_node]
test_nodes.append(
ExprNodes.PythonCapiCallNode(
test_type_node.pos, type_check_function, self.Py_type_check_func_type,
args=type_check_args,
utility_code=entry.utility_code if entry is not None else None,
is_temp=True,
))
def join_with_or(a, b, make_binop_node=ExprNodes.binop_node):
or_node = make_binop_node(node.pos, 'or', a, b)
or_node.type = PyrexTypes.c_bint_type
or_node.wrap_operands(env)
return or_node
test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env)
for temp in temps[::-1]:
test_node = UtilNodes.EvalWithTempExprNode(temp, test_node)
return test_node
def _handle_simple_function_ord(self, node, function, pos_args):
"""Unpack ord(Py_UNICODE) and ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.arg.type.is_unicode_char:
return ExprNodes.TypecastNode(
arg.pos, operand=arg.arg, type=PyrexTypes.c_long_type
).coerce_to(node.type, self.current_env())
elif isinstance(arg, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
if len(arg.value) == 1:
return ExprNodes.IntNode.for_int(arg.pos, ord(arg.value)).coerce_to(node.type, self.current_env())
return node
### special methods
Pyx_tp_new_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
])
Pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
PyrexTypes.CFuncTypeArg("kwargs", Builtin.dict_type, None),
])
def _handle_any_slot__new__(self, node, function, args,
is_unbound_method, kwargs=None):
"""Replace 'exttype.__new__(exttype, ...)' by a call to exttype->tp_new()
"""
obj = function.obj
if not is_unbound_method or len(args) < 1:
return node
type_arg = args[0]
if not obj.is_name or not type_arg.is_name:
return node # not a simple case
if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type:
return node # not a known type
if not type_arg.type_entry or not obj.type_entry:
if obj.name != type_arg.name:
return node
# otherwise, we know it's a type and we know it's the same
# type for both - that should do
elif type_arg.type_entry != obj.type_entry:
# different types - may or may not lead to an error at runtime
return node
args_tuple = ExprNodes.TupleNode(node.pos, args=args[1:])
args_tuple = args_tuple.analyse_types(
self.current_env(), skip_children=True)
if type_arg.type_entry:
ext_type = type_arg.type_entry.type
if (ext_type.is_extension_type and ext_type.typeobj_cname and
ext_type.scope.global_scope() == self.current_env().global_scope()):
# known type in current module
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(ext_type.scope, tp_slot)
if slot_func_cname:
cython_scope = self.context.cython_scope
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
ext_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
])
type_arg = ExprNodes.CastNode(type_arg, PyTypeObjectPtr)
if not kwargs:
kwargs = ExprNodes.NullNode(node.pos, type=PyrexTypes.py_object_type) # hack?
return ExprNodes.PythonCapiCallNode(
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
may_return_none=False,
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
type_arg = type_arg.as_none_safe_node(
"object.__new__(X): X is not a type object (NoneType)")
utility_code = UtilityCode.load_cached('tp_new', 'ObjectHandling.c')
if kwargs:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new_kwargs", self.Pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
utility_code=utility_code,
is_temp=node.is_temp
)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type,
args=[type_arg, args_tuple],
utility_code=utility_code,
is_temp=node.is_temp
)
def _handle_any_slot__class__(self, node, function, args,
is_unbound_method, kwargs=None):
# The purpose of this function is to handle calls to instance.__class__() so that
# it doesn't get handled by the __Pyx_CallUnboundCMethod0 mechanism.
# TODO: optimizations of the instance.__class__() call might be possible in future.
return node
### methods of builtin types
PyObject_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
],
exception_value=-1)
def _handle_simple_method_object_append(self, node, function, args, is_unbound_method):
"""Optimistic optimisation as X.append() is almost always
referring to a list.
"""
if len(args) != 2 or node.result_is_used or node.function.entry:
return node
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type,
args=args,
may_return_none=False,
is_temp=node.is_temp,
result_is_used=False,
utility_code=load_c_utility('append')
)
def _handle_simple_method_list_extend(self, node, function, args, is_unbound_method):
"""Replace list.extend([...]) for short sequence literals values by sequential appends
to avoid creating an intermediate sequence argument.
"""
if len(args) != 2:
return node
obj, value = args
if not value.is_sequence_constructor:
return node
items = list(value.args)
if value.mult_factor is not None or len(items) > 8:
# Appending wins for short sequences but slows down when multiple resize operations are needed.
# This seems to be a good enough limit that avoids repeated resizing.
if False and isinstance(value, ExprNodes.ListNode):
# One would expect that tuples are more efficient here, but benchmarking with
# Py3.5 and Py3.7 suggests that they are not. Probably worth revisiting at some point.
# Might be related to the usage of PySequence_FAST() in CPython's list.extend(),
# which is probably tuned more towards lists than tuples (and rightly so).
tuple_node = args[1].as_tuple().analyse_types(self.current_env(), skip_children=True)
Visitor.recursively_replace_node(node, args[1], tuple_node)
return node
wrapped_obj = self._wrap_self_arg(obj, function, is_unbound_method, 'extend')
if not items:
# Empty sequences are not likely to occur, but why waste a call to list.extend() for them?
wrapped_obj.result_is_used = node.result_is_used
return wrapped_obj
cloned_obj = obj = wrapped_obj
if len(items) > 1 and not obj.is_simple():
cloned_obj = UtilNodes.LetRefNode(obj)
# Use ListComp_Append() for all but the last item and finish with PyList_Append()
# to shrink the list storage size at the very end if necessary.
temps = []
arg = items[-1]
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyList_Append", self.PyObject_Append_func_type,
args=[cloned_obj, arg],
is_temp=True,
utility_code=load_c_utility("ListAppend"))
for arg in items[-2::-1]:
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
new_node = ExprNodes.binop_node(
node.pos, '|',
ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_ListComp_Append", self.PyObject_Append_func_type,
args=[cloned_obj, arg], py_name="extend",
is_temp=True,
utility_code=load_c_utility("ListCompAppend")),
new_node,
type=PyrexTypes.c_returncode_type,
)
new_node.result_is_used = node.result_is_used
if cloned_obj is not obj:
temps.append(cloned_obj)
for temp in temps:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
new_node.result_is_used = node.result_is_used
return new_node
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_int_type, None),
],
exception_value=-1)
PyByteArray_AppendObject_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None),
],
exception_value=-1)
def _handle_simple_method_bytearray_append(self, node, function, args, is_unbound_method):
if len(args) != 2:
return node
func_name = "__Pyx_PyByteArray_Append"
func_type = self.PyByteArray_Append_func_type
value = unwrap_coerced_node(args[1])
if value.type.is_int or isinstance(value, ExprNodes.IntNode):
value = value.coerce_to(PyrexTypes.c_int_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.is_string_literal:
if not value.can_coerce_to_char_literal():
return node
value = value.coerce_to(PyrexTypes.c_char_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.type.is_pyobject:
func_name = "__Pyx_PyByteArray_AppendObject"
func_type = self.PyByteArray_AppendObject_func_type
utility_code = UtilityCode.load_cached("ByteArrayAppendObject", "StringTools.c")
else:
return node
new_node = ExprNodes.PythonCapiCallNode(
node.pos, func_name, func_type,
args=[args[0], value],
may_return_none=False,
is_temp=node.is_temp,
utility_code=utility_code,
)
if node.result_is_used:
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
PyObject_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
])
PyObject_PopIndex_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("py_index", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("c_index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("is_signed", PyrexTypes.c_int_type, None),
],
has_varargs=True) # to fake the additional macro args that lack a proper C type
def _handle_simple_method_list_pop(self, node, function, args, is_unbound_method):
return self._handle_simple_method_object_pop(
node, function, args, is_unbound_method, is_list=True)
def _handle_simple_method_object_pop(self, node, function, args, is_unbound_method, is_list=False):
"""Optimistic optimisation as X.pop([n]) is almost always
referring to a list.
"""
if not args:
return node
obj = args[0]
if is_list:
type_name = 'List'
obj = obj.as_none_safe_node(
"'NoneType' object has no attribute '%.30s'",
error="PyExc_AttributeError",
format_args=['pop'])
else:
type_name = 'Object'
if len(args) == 1:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_Pop" % type_name,
self.PyObject_Pop_func_type,
args=[obj],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility('pop'),
)
elif len(args) == 2:
index = unwrap_coerced_node(args[1])
py_index = ExprNodes.NoneNode(index.pos)
orig_index_type = index.type
if not index.type.is_int:
if isinstance(index, ExprNodes.IntNode):
py_index = index.coerce_to_pyobject(self.current_env())
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
elif is_list:
if index.type.is_pyobject:
py_index = index.coerce_to_simple(self.current_env())
index = ExprNodes.CloneNode(py_index)
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
else:
return node
elif not PyrexTypes.numeric_type_fits(index.type, PyrexTypes.c_py_ssize_t_type):
return node
elif isinstance(index, ExprNodes.IntNode):
py_index = index.coerce_to_pyobject(self.current_env())
# real type might still be larger at runtime
if not orig_index_type.is_int:
orig_index_type = index.type
if not orig_index_type.create_to_py_utility_code(self.current_env()):
return node
convert_func = orig_index_type.to_py_function
conversion_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [PyrexTypes.CFuncTypeArg("intval", orig_index_type, None)])
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_PopIndex" % type_name,
self.PyObject_PopIndex_func_type,
args=[obj, py_index, index,
ExprNodes.IntNode.for_int(index.pos, 1 if orig_index_type.signed else 0),
ExprNodes.RawCNameExprNode(index.pos, PyrexTypes.c_void_type,
orig_index_type.empty_declaration_code()),
ExprNodes.RawCNameExprNode(index.pos, conversion_type, convert_func)],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility("pop_index"),
)
return node
single_param_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value=-1)
def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method):
"""Call PyList_Sort() instead of the 0-argument l.sort().
"""
if len(args) != 1:
return node
return self._substitute_method_call(
node, function, "PyList_Sort", self.single_param_func_type,
'sort', is_unbound_method, args).coerce_to(node.type, self.current_env)
Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method):
"""Replace dict.get() by a call to PyDict_GetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
'get', is_unbound_method, args,
may_return_none = True,
utility_code = load_c_utility("dict_getitem_default"))
Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None),
])
def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method):
"""Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type,
'setdefault', is_unbound_method, args,
may_return_none=True,
utility_code=UtilityCode.load_cached('dict_setdefault', "Builtins.c"))
PyDict_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
PyDict_Pop_ignore_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
],
exception_value=PyrexTypes.c_int_type.exception_value,
)
def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
"""Replace dict.pop() by a call to _PyDict_Pop().
"""
capi_func = "__Pyx_PyDict_Pop"
utility_code_name = 'py_dict_pop'
func_type = self.PyDict_Pop_func_type
if len(args) == 2:
args.append(ExprNodes.NullNode(node.pos))
elif len(args) == 3:
if not node.result_is_used:
# special case: we can ignore the default value
capi_func = "__Pyx_PyDict_Pop_ignore"
utility_code_name = 'py_dict_pop_ignore'
func_type = self.PyDict_Pop_ignore_func_type
else:
self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
capi_func, func_type,
'pop', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility(utility_code_name))
Pyx_BinopInt_func_types = {
(ctype, ret_type): PyrexTypes.CFuncType(
ret_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("cval", ctype, None),
PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
PyrexTypes.CFuncTypeArg("zerodiv_check", PyrexTypes.c_bint_type, None),
], exception_value=None if ret_type.is_pyobject else ret_type.exception_value)
for ctype in (PyrexTypes.c_long_type, PyrexTypes.c_double_type)
for ret_type in (Builtin.float_type, Builtin.int_type, PyrexTypes.py_object_type, PyrexTypes.c_bint_type)
}
def _handle_simple_method_object___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_object___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_object___mul__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Multiply', node, function, args, is_unbound_method)
def _handle_simple_method_object___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_object___ne__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _handle_simple_method_object___and__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('And', node, function, args, is_unbound_method)
def _handle_simple_method_object___or__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Or', node, function, args, is_unbound_method)
def _handle_simple_method_object___xor__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Xor', node, function, args, is_unbound_method)
def _handle_simple_method_object___rshift__(self, node, function, args, is_unbound_method):
if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
return node
if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
return node
return self._optimise_num_binop('Rshift', node, function, args, is_unbound_method)
def _handle_simple_method_object___lshift__(self, node, function, args, is_unbound_method):
if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
return node
if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
return node
return self._optimise_num_binop('Lshift', node, function, args, is_unbound_method)
def _handle_simple_method_object___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_object___floordiv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('FloorDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Divide', node, function, args, is_unbound_method)
_handle_simple_method_int___add__ = _handle_simple_method_object___add__
_handle_simple_method_int___sub__ = _handle_simple_method_object___sub__
_handle_simple_method_int___mul__ = _handle_simple_method_object___mul__
_handle_simple_method_int___eq__ = _handle_simple_method_object___eq__
_handle_simple_method_int___ne__ = _handle_simple_method_object___ne__
_handle_simple_method_int___and__ = _handle_simple_method_object___and__
_handle_simple_method_int___or__ = _handle_simple_method_object___or__
_handle_simple_method_int___xor__ = _handle_simple_method_object___xor__
_handle_simple_method_int___rshift__ = _handle_simple_method_object___rshift__
_handle_simple_method_int___lshift__ = _handle_simple_method_object___lshift__
_handle_simple_method_int___mod__ = _handle_simple_method_object___mod__
_handle_simple_method_int___floordiv__ = _handle_simple_method_object___floordiv__
_handle_simple_method_int___truediv__ = _handle_simple_method_object___truediv__
def _optimise_num_div(self, operator, node, function, args, is_unbound_method):
if len(args) != 2 or not args[1].has_constant_result() or args[1].constant_result == 0:
return node
if isinstance(args[1], ExprNodes.IntNode):
if not (-2**30 <= args[1].constant_result <= 2**30):
return node
elif isinstance(args[1], ExprNodes.FloatNode):
if not (-2**53 <= args[1].constant_result <= 2**53):
return node
else:
return node
return self._optimise_num_binop(operator, node, function, args, is_unbound_method)
def _handle_simple_method_float___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_float___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_float___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_float___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Divide', node, function, args, is_unbound_method)
def _handle_simple_method_float___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_float___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_float___ne__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _optimise_num_binop(self, operator, node, function, args, is_unbound_method):
"""
Optimise math operators for (likely) float or small integer operations.
"""
if getattr(node, "special_bool_cmp_function", None):
return node # already optimized
if len(args) != 2:
return node
if node.type is Builtin.int_type or node.type is Builtin.float_type:
ret_type = node.type
elif node.type.is_pyobject:
ret_type = PyrexTypes.py_object_type
elif node.type is PyrexTypes.c_bint_type and operator in ('Eq', 'Ne'):
ret_type = PyrexTypes.c_bint_type
else:
return node
result = optimise_numeric_binop(operator, node, ret_type, args[0], args[1])
if result is None:
return node
func_cname, utility_code, extra_args, num_type = result
assert all([arg.type.is_pyobject for arg in args])
args = list(args) + extra_args
call_node = self._substitute_method_call(
node, function,
func_cname,
self.Pyx_BinopInt_func_types[(num_type, ret_type)],
'__%s__' % operator[:3].lower(), is_unbound_method, args,
may_return_none=ret_type is PyrexTypes.py_object_type,
with_none_check=False,
utility_code=utility_code)
if node.type.is_pyobject and not ret_type.is_pyobject:
call_node = ExprNodes.CoerceToPyTypeNode(call_node, self.current_env(), node.type)
return call_node
### unicode type methods
PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_predicate(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
# None of these are defined in the Limited API, and some are undefined in PyPy too.
utility_code = TempitaUtilityCode.load_cached(
"py_unicode_predicate", "StringTools.c",
context=dict(method_name=method_name)
)
function_name = '__Pyx_Py_UNICODE_%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_predicate_func_type,
method_name, is_unbound_method, [uchar],
utility_code = utility_code)
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_isalnum = _inject_unicode_predicate
_handle_simple_method_unicode_isalpha = _inject_unicode_predicate
_handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
_handle_simple_method_unicode_isdigit = _inject_unicode_predicate
_handle_simple_method_unicode_islower = _inject_unicode_predicate
_handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
_handle_simple_method_unicode_isspace = _inject_unicode_predicate
_handle_simple_method_unicode_istitle = _inject_unicode_predicate
_handle_simple_method_unicode_isupper = _inject_unicode_predicate
_handle_simple_method_unicode_isprintable = _inject_unicode_predicate
PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
# DISABLED: Return value can only be one character, which is not correct.
'''
def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
function_name = 'Py_UNICODE_TO%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_conversion_func_type,
method_name, is_unbound_method, [uchar])
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
#_handle_simple_method_unicode_lower = _inject_unicode_character_conversion
#_handle_simple_method_unicode_upper = _inject_unicode_character_conversion
#_handle_simple_method_unicode_title = _inject_unicode_character_conversion
'''
PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method):
"""Replace unicode.splitlines(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2):
self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
return node
self._inject_bint_default_argument(node, args, 1, False)
return self._substitute_method_call(
node, function,
"PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
'splitlines', is_unbound_method, args)
PyUnicode_Split_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
]
)
def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method):
"""Replace unicode.split(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2,3):
self._error_wrong_arg_count('unicode.split', node, args, "1-3")
return node
if len(args) < 2:
args.append(ExprNodes.NullNode(node.pos))
else:
self._inject_null_for_none(args, 1)
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function,
"PyUnicode_Split", self.PyUnicode_Split_func_type,
'split', is_unbound_method, args)
PyUnicode_Join_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("seq", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_unicode_join(self, node, function, args, is_unbound_method):
"""
unicode.join() builds a list first => see if we can do this more efficiently
"""
if len(args) != 2:
self._error_wrong_arg_count('unicode.join', node, args, "2")
return node
if isinstance(args[1], ExprNodes.GeneratorExpressionNode):
gen_expr_node = args[1]
loop_node = gen_expr_node.loop
yield_statements = _find_yield_statements(loop_node)
if yield_statements:
inlined_genexpr = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='list',
comprehension_type=Builtin.list_type)
for yield_expression, yield_stat_node in yield_statements:
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=inlined_genexpr.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
args[1] = inlined_genexpr
return self._substitute_method_call(
node, function,
"PyUnicode_Join", self.PyUnicode_Join_func_type,
'join', is_unbound_method, args)
PyString_Tailmatch_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value=-1)
def _handle_simple_method_unicode_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'endswith',
unicode_tailmatch_utility_code, +1)
def _handle_simple_method_unicode_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'startswith',
unicode_tailmatch_utility_code, -1)
def _inject_tailmatch(self, node, function, args, is_unbound_method, type_name,
method_name, utility_code, direction):
"""Replace unicode.startswith(...) and unicode.endswith(...)
by a direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count(f"{type_name}.{method_name}", node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode.for_int(node.pos, direction))
if type_name == 'str':
func_name = "__Pyx_PyUnicode_Tailmatch"
else:
func_name = f"__Pyx_Py{type_name.capitalize()}_Tailmatch"
method_call = self._substitute_method_call(
node, function,
func_name, self.PyString_Tailmatch_func_type,
method_name, is_unbound_method, args,
utility_code = utility_code)
return method_call.coerce_to(Builtin.bool_type, self.current_env())
PyUnicode_Find_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value=-2)
def _handle_simple_method_unicode_find(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'find', +1)
def _handle_simple_method_unicode_rfind(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'rfind', -1)
def _inject_unicode_find(self, node, function, args, is_unbound_method,
method_name, direction):
"""Replace unicode.find(...) and unicode.rfind(...) by a
direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode.for_int(node.pos, direction))
method_call = self._substitute_method_call(
node, function, "PyUnicode_Find", self.PyUnicode_Find_func_type,
method_name, is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Count_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
],
exception_value=-1)
def _handle_simple_method_unicode_count(self, node, function, args, is_unbound_method):
"""Replace unicode.count(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.count', node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
method_call = self._substitute_method_call(
node, function, "PyUnicode_Count", self.PyUnicode_Count_func_type,
'count', is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Replace_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None),
])
def _handle_simple_method_unicode_replace(self, node, function, args, is_unbound_method):
"""Replace unicode.replace(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (3,4):
self._error_wrong_arg_count('unicode.replace', node, args, "3-4")
return node
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function, "PyUnicode_Replace", self.PyUnicode_Replace_func_type,
'replace', is_unbound_method, args)
PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
])
PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
])
_special_encodings = ['UTF8', 'UTF16', 'UTF-16LE', 'UTF-16BE', 'Latin1', 'ASCII',
'unicode_escape', 'raw_unicode_escape']
_special_codecs = [ (name, codecs.getencoder(name))
for name in _special_encodings ]
def _handle_simple_method_unicode_encode(self, node, function, args, is_unbound_method):
"""Replace unicode.encode(...) by a direct C-API call to the
corresponding codec.
"""
if len(args) < 1 or len(args) > 3:
self._error_wrong_arg_count('unicode.encode', node, args, '1-3')
return node
string_node = args[0]
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if string_node.has_constant_result():
# constant, so try to do the encoding at compile time
try:
value = string_node.constant_result.encode(encoding, error_handling)
except:
# well, looks like we can't
pass
else:
value = bytes_literal(value, encoding or 'UTF-8')
return ExprNodes.BytesNode(string_node.pos, value=value, type=Builtin.bytes_type)
if len(args) == 1:
null_node = ExprNodes.NullNode(node.pos)
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method, [string_node, null_node, null_node])
if encoding and error_handling == 'strict':
# try to find a specific encoder function
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None and '-' not in codec_name:
encode_function = "PyUnicode_As%sString" % codec_name
return self._substitute_method_call(
node, function, encode_function,
self.PyUnicode_AsXyzString_func_type,
'encode', is_unbound_method, [string_node])
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method,
[string_node, encoding_node, error_handling_node])
PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
]))
_decode_c_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_bytes_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_cpp_string_func_type = None # lazy init
def _handle_simple_method_bytes_decode(self, node, function, args, is_unbound_method):
"""Replace char*.decode() by a direct C-API call to the
corresponding codec, possibly resolving a slice on the char*.
"""
if not (1 <= len(args) <= 3):
self._error_wrong_arg_count('bytes.decode', node, args, '1-3')
return node
# Try to extract encoding parameters and attempt constant decode.
string_node = args[0]
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if string_node.has_constant_result():
try:
constant_result = string_node.constant_result.decode(encoding, error_handling)
except (AttributeError, ValueError, UnicodeDecodeError):
pass
else:
return UnicodeNode(
string_node.pos,
value=EncodedString(constant_result),
bytes_value=string_node.constant_result,
)
# normalise input nodes
start = stop = None
if isinstance(string_node, ExprNodes.SliceIndexNode):
index_node = string_node
string_node = index_node.base
start, stop = index_node.start, index_node.stop
if not start or start.constant_result == 0:
start = None
if isinstance(string_node, ExprNodes.CoerceToPyTypeNode):
string_node = string_node.arg
string_type = string_node.type
if string_type in (Builtin.bytes_type, Builtin.bytearray_type):
if is_unbound_method:
string_node = string_node.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=['decode', string_type.name])
else:
string_node = string_node.as_none_safe_node(
"'NoneType' object has no attribute '%.30s'",
error="PyExc_AttributeError",
format_args=['decode'])
elif not string_type.is_string and not string_type.is_cpp_string:
# nothing to optimise here
return node
if not start:
start = ExprNodes.IntNode.for_size(node.pos, 0)
elif not start.type.is_int:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop and not stop.type.is_int:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
# try to find a specific encoder function
codec_name = None
if encoding is not None:
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
if codec_name in ('UTF16', 'UTF-16LE', 'UTF-16BE'):
codec_cname = "__Pyx_PyUnicode_Decode%s" % codec_name.replace('-', '')
else:
codec_cname = "PyUnicode_Decode%s" % codec_name
decode_function = ExprNodes.RawCNameExprNode(
node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type, cname=codec_cname)
encoding_node = ExprNodes.NullNode(node.pos)
else:
decode_function = ExprNodes.NullNode(node.pos)
# build the helper function call
temps = []
if string_type.is_string:
# C string
if not stop:
# use strlen() to find the string length, just as CPython would
if not string_node.is_name:
string_node = UtilNodes.LetRefNode(string_node) # used twice
temps.append(string_node)
stop = ExprNodes.PythonCapiCallNode(
string_node.pos, "__Pyx_ssize_strlen", self.Pyx_ssize_strlen_func_type,
args=[string_node],
is_temp=True,
)
helper_func_type = self._decode_c_string_func_type
utility_code_name = 'decode_c_string'
elif string_type.is_cpp_string:
# C++ std::string
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
if self._decode_cpp_string_func_type is None:
# lazy init to reuse the C++ string type
self._decode_cpp_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", string_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None),
])
helper_func_type = self._decode_cpp_string_func_type
utility_code_name = f'decode_cpp_{string_type.name}'
else:
# Python bytes/bytearray object
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
helper_func_type = self._decode_bytes_func_type
if string_type is Builtin.bytes_type:
utility_code_name = 'decode_bytes'
else:
utility_code_name = 'decode_bytearray'
node = ExprNodes.PythonCapiCallNode(
node.pos, '__Pyx_%s' % utility_code_name, helper_func_type,
args=[string_node, start, stop, encoding_node, error_handling_node, decode_function],
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached(utility_code_name, 'StringTools.c'),
)
for temp in temps[::-1]:
node = UtilNodes.EvalWithTempExprNode(temp, node)
return node
_handle_simple_method_bytearray_decode = _handle_simple_method_bytes_decode
def _find_special_codec_name(self, encoding):
try:
requested_codec = codecs.getencoder(encoding)
except LookupError:
return None
for name, codec in self._special_codecs:
if codec == requested_codec:
if '_' in name:
name = ''.join([s.capitalize()
for s in name.split('_')])
return name
return None
def _unpack_encoding_and_error_mode(self, pos, args):
null_node = ExprNodes.NullNode(pos)
if len(args) >= 2:
encoding, encoding_node = self._unpack_string_and_cstring_node(args[1])
if encoding_node is None:
return None
else:
encoding = None
encoding_node = null_node
if len(args) == 3:
error_handling, error_handling_node = self._unpack_string_and_cstring_node(args[2])
if error_handling_node is None:
return None
if error_handling == 'strict':
error_handling_node = null_node
else:
error_handling = 'strict'
error_handling_node = null_node
return (encoding, encoding_node, error_handling, error_handling_node)
def _unpack_string_and_cstring_node(self, node):
if isinstance(node, ExprNodes.CoerceToPyTypeNode):
node = node.arg
if isinstance(node, ExprNodes.UnicodeNode):
encoding = node.value
node = ExprNodes.BytesNode(
node.pos, value=encoding.as_utf8_string(), type=PyrexTypes.c_const_char_ptr_type)
elif isinstance(node, ExprNodes.BytesNode):
encoding = node.value.decode('ISO-8859-1')
node = ExprNodes.BytesNode(
node.pos, value=node.value, type=PyrexTypes.c_const_char_ptr_type)
elif node.type is Builtin.bytes_type:
encoding = None
node = node.coerce_to(PyrexTypes.c_const_char_ptr_type, self.current_env())
elif node.type.is_string:
encoding = None
else:
encoding = node = None
return encoding, node
def _handle_simple_method_bytes_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytes_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'startswith',
bytes_tailmatch_utility_code, -1)
''' # disabled for now, enable when we consider it worth it (see StringTools.c)
def _handle_simple_method_bytearray_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytearray_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'startswith',
bytes_tailmatch_utility_code, -1)
'''
### helpers
def _substitute_method_call(self, node, function, name, func_type,
attr_name, is_unbound_method, args=(),
utility_code=None, is_temp=None,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none,
with_none_check=True):
args = list(args)
if with_none_check and args:
args[0] = self._wrap_self_arg(args[0], function, is_unbound_method, attr_name)
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
node.pos, name, func_type,
args = args,
is_temp = is_temp,
utility_code = utility_code,
may_return_none = may_return_none,
result_is_used = node.result_is_used,
)
def _wrap_self_arg(self, self_arg, function, is_unbound_method, attr_name):
if self_arg.is_literal:
return self_arg
if is_unbound_method:
self_arg = self_arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[attr_name, self_arg.type.name])
else:
self_arg = self_arg.as_none_safe_node(
"'NoneType' object has no attribute '%{}s'".format('.30' if len(attr_name) <= 30 else ''),
error="PyExc_AttributeError",
format_args=[attr_name])
return self_arg
obj_to_obj_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
])
def _inject_null_for_none(self, args, index):
if len(args) <= index:
return
arg = args[index]
if not arg.may_be_none():
return
args[index] = ExprNodes.NullNode(arg.pos) if arg.is_none else ExprNodes.PythonCapiCallNode(
arg.pos, "__Pyx_NoneAsNull",
self.obj_to_obj_func_type,
args=[arg.coerce_to_simple(self.current_env())],
is_temp=0,
)
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
# Python usually allows passing None for range bounds,
# so we treat that as requesting the default.
assert len(args) >= arg_index
if len(args) == arg_index or args[arg_index].is_none:
if isinstance(default_value, int) or str(default_value).lstrip('+-').isdecimal():
int_node = ExprNodes.IntNode.for_int(node.pos, int(default_value))
else:
int_node = ExprNodes.IntNode(node.pos, value=str(default_value), type=type)
args.append(int_node)
else:
arg = args[arg_index].coerce_to(type, self.current_env())
if isinstance(arg, ExprNodes.CoerceFromPyTypeNode):
# Add a runtime check for None and map it to the default value.
arg.special_none_cvalue = str(default_value)
args[arg_index] = arg
def _inject_bint_default_argument(self, node, args, arg_index, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
args.append(ExprNodes.BoolNode(node.pos, value=bool(default_value)))
else:
args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env())
def optimise_numeric_binop(operator, node, ret_type, arg0, arg1):
"""
Optimise math operators for (likely) float or small integer operations.
"""
# When adding IntNode/FloatNode to something else, assume other operand is also numeric.
# Prefer constants on RHS as they allows better size control for some operators.
num_nodes = (ExprNodes.IntNode, ExprNodes.FloatNode)
if isinstance(arg1, num_nodes):
if arg0.type is not PyrexTypes.py_object_type and arg0.type is not Builtin.int_type:
return None
numval = arg1
arg_order = 'ObjC'
elif isinstance(arg0, num_nodes):
if arg1.type is not PyrexTypes.py_object_type and arg1.type is not Builtin.int_type:
return None
numval = arg0
arg_order = 'CObj'
else:
return None
if not numval.has_constant_result():
return None
# is_float is an instance check rather that numval.type.is_float because
# it will often be a Python float type rather than a C float type
is_float = isinstance(numval, ExprNodes.FloatNode)
num_type = PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type
if is_float:
if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'):
return None
elif operator == 'Divide':
# mixed old-/new-style division is not currently optimised for integers
return None
elif abs(numval.constant_result) > 2**30:
# Cut off at an integer border that is still safe for all operations.
return None
if operator in ('TrueDivide', 'FloorDivide', 'Divide', 'Remainder'):
if arg1.constant_result == 0:
# Don't optimise division by 0. :)
return None
extra_args = []
extra_args.append((ExprNodes.FloatNode if is_float else ExprNodes.IntNode)(
numval.pos, value=numval.value, constant_result=numval.constant_result,
type=num_type))
inplace = node.inplace if isinstance(node, ExprNodes.NumBinopNode) else False
extra_args.append(ExprNodes.BoolNode(node.pos, value=inplace))
if is_float or operator not in ('Eq', 'Ne'):
# "PyFloatBinop" and "PyLongBinop" take an additional "check for zero division" argument.
zerodivision_check = arg_order == 'CObj' and (
not node.cdivision if isinstance(node, ExprNodes.DivNode) else False)
extra_args.append(ExprNodes.BoolNode(node.pos, value=zerodivision_check))
utility_code = TempitaUtilityCode.load_cached(
"PyFloatBinop" if is_float else "PyLongCompare" if operator in ('Eq', 'Ne') else "PyLongBinop",
"Optimize.c",
context=dict(op=operator, order=arg_order, ret_type=ret_type))
func_cname = "__Pyx_Py%s_%s%s%s" % (
'Float' if is_float else 'Long',
'' if ret_type.is_pyobject else 'Bool',
operator,
arg_order)
return func_cname, utility_code, extra_args, num_type
unicode_tailmatch_utility_code = UtilityCode.load_cached('unicode_tailmatch', 'StringTools.c')
bytes_tailmatch_utility_code = UtilityCode.load_cached('bytes_tailmatch', 'StringTools.c')
| OptimizeBuiltinCalls |
python | huggingface__transformers | src/transformers/utils/auto_docstring.py | {
"start": 29657,
"end": 32887
} | class ____:
PreTrainedModel = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
"""
Model = r"""
The bare {model_name} Model outputting raw hidden-states without any specific head on top.
"""
ForPreTraining = r"""
The {model_name} Model with a specified pretraining head on top.
"""
Decoder = r"""
The bare {model_name} Decoder outputting raw hidden-states without any specific head on top.
"""
TextModel = r"""
The bare {model_name} Text Model outputting raw hidden-states without any specific head on to.
"""
ForSequenceClassification = r"""
The {model_name} Model with a sequence classification/regression head on top e.g. for GLUE tasks.
"""
ForQuestionAnswering = r"""
The {model_name} transformer with a span classification head on top for extractive question-answering tasks like
SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
"""
ForMultipleChoice = r"""
The {model_name} Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
"""
ForMaskedLM = r"""
The {model_name} Model with a `language modeling` head on top."
"""
ForTokenClassification = r"""
The {model_name} transformer with a token classification head on top (a linear layer on top of the hidden-states
output) e.g. for Named-Entity-Recognition (NER) tasks.
"""
ForConditionalGeneration = r"""
The {model_name} Model for token generation conditioned on other modalities (e.g. image-text-to-text generation).
"""
ForCausalLM = r"""
The {model_name} Model for causal language modeling.
"""
ImageProcessorFast = r"""
Constructs a fast {model_name} image processor.
"""
Backbone = r"""
The {model_name} backbone.
"""
ForImageClassification = r"""
The {model_name} Model with an image classification head on top e.g. for ImageNet.
"""
ForSemanticSegmentation = r"""
The {model_name} Model with a semantic segmentation head on top e.g. for ADE20K, CityScapes.
"""
ForAudioClassification = r"""
The {model_name} Model with an audio classification head on top (a linear layer on top of the pooled
output).
"""
ForAudioFrameClassification = r"""
The {model_name} Model with a frame classification head on top for tasks like Speaker Diarization.
"""
ForPrediction = r"""
The {model_name} Model with a distribution head on top for time-series forecasting.
"""
WithProjection = r"""
The {model_name} Model with a projection layer on top (a linear layer on top of the pooled output).
"""
| ClassDocstring |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/customize/spyderpdb.py | {
"start": 1025,
"end": 1686
} | class ____:
"""
Notifies the frontend when debugging starts/stops
"""
def __init__(self, pdb_obj):
self.pdb_obj = pdb_obj
self._cleanup = True
def __enter__(self):
"""
Debugging starts.
"""
shell = self.pdb_obj.shell
if shell.pdb_session == self.pdb_obj:
self._cleanup = False
else:
shell.add_pdb_session(self.pdb_obj)
self._cleanup = True
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Debugging ends.
"""
if self._cleanup:
self.pdb_obj.shell.remove_pdb_session(self.pdb_obj)
| DebugWrapper |
python | PrefectHQ__prefect | src/prefect/task_runners.py | {
"start": 18853,
"end": 20045
} | class ____(concurrent.futures.Future[R]):
"""Wrapper for a Future that unpickles the result returned by cloudpickle_wrapped_call."""
def __init__(self, wrapped_future: concurrent.futures.Future[bytes]):
self.wrapped_future = wrapped_future
def result(self, timeout: float | None = None) -> R:
pickled_result = self.wrapped_future.result(timeout)
import cloudpickle
return cloudpickle.loads(pickled_result)
def exception(self, timeout: float | None = None) -> BaseException | None:
return self.wrapped_future.exception(timeout)
def done(self) -> bool:
return self.wrapped_future.done()
def cancelled(self) -> bool:
return self.wrapped_future.cancelled()
def cancel(self) -> bool:
return self.wrapped_future.cancel()
def add_done_callback(
self, fn: Callable[[concurrent.futures.Future[R]], object]
) -> None:
def _fn(wrapped_future: concurrent.futures.Future[bytes]) -> None:
import cloudpickle
result = cloudpickle.loads(wrapped_future.result())
fn(result)
return self.wrapped_future.add_done_callback(_fn)
| _UnpicklingFuture |
python | sphinx-doc__sphinx | tests/roots/test-ext-viewcode-find-package/main_package/subpackage/_subpackage2/submodule.py | {
"start": 233,
"end": 342
} | class ____:
"""this is Class3"""
class_attr = 42
"""this is the class attribute class_attr"""
| Class3 |
python | django-haystack__django-haystack | test_haystack/whoosh_tests/test_whoosh_backend.py | {
"start": 28390,
"end": 30094
} | class ____(WhooshTestCase):
def setUp(self):
super().setUp()
self.old_ui = connections["whoosh"].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshBoostMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections["whoosh"].get_backend()
connections["whoosh"]._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = []
for i in range(1, 5):
mock = AFourthMockModel()
mock.id = i
if i % 2:
mock.author = "daniel"
mock.editor = "david"
else:
mock.author = "david"
mock.editor = "daniel"
mock.pub_date = date(2009, 2, 25) - timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections["whoosh"]._index = self.ui
super().tearDown()
@unittest.expectedFailure
def test_boost(self):
self.sb.update(self.wmmi, self.sample_objs)
self.raw_whoosh = self.raw_whoosh.refresh()
searcher = self.raw_whoosh.searcher()
self.assertEqual(len(searcher.search(self.parser.parse("*"), limit=1000)), 2)
results = SearchQuerySet("whoosh").filter(
SQ(author="daniel") | SQ(editor="daniel")
)
self.assertEqual(
[result.id for result in results],
["core.afourthmockmodel.1", "core.afourthmockmodel.3"],
)
self.assertEqual(results[0].boost, 1.1)
| WhooshBoostBackendTestCase |
python | sympy__sympy | sympy/matrices/common.py | {
"start": 2030,
"end": 4130
} | class ____(type):
#
# Override the default __instancecheck__ implementation to ensure that
# e.g. isinstance(M, MatrixCommon) still works when M is one of the
# matrix classes. Matrix no longer inherits from MatrixCommon so
# isinstance(M, MatrixCommon) would now return False by default.
#
# There were lots of places in the codebase where this was being done
# so it seems likely that downstream code may be doing it too. All use
# of these mixins is deprecated though so we give a deprecation warning
# unconditionally if they are being used with isinstance.
#
# Any code seeing this deprecation warning should be changed to use
# isinstance(M, MatrixBase) instead which also works in previous versions
# of SymPy.
#
def __instancecheck__(cls, instance):
sympy_deprecation_warning(
f"""
Checking whether an object is an instance of {cls.__name__} is
deprecated.
Use `isinstance(obj, Matrix)` instead of `isinstance(obj, {cls.__name__})`.
""",
deprecated_since_version="1.13",
active_deprecations_target="deprecated-matrix-mixins",
stacklevel=3,
)
from sympy.matrices.matrixbase import MatrixBase
from sympy.matrices.matrices import (
MatrixDeterminant,
MatrixReductions,
MatrixSubspaces,
MatrixEigen,
MatrixCalculus,
MatrixDeprecated
)
all_mixins = (
MatrixRequired,
MatrixShaping,
MatrixSpecial,
MatrixProperties,
MatrixOperations,
MatrixArithmetic,
MatrixCommon,
MatrixDeterminant,
MatrixReductions,
MatrixSubspaces,
MatrixEigen,
MatrixCalculus,
MatrixDeprecated
)
if cls in all_mixins and isinstance(instance, MatrixBase):
return True
else:
return super().__instancecheck__(instance)
| _MatrixDeprecatedMeta |
python | pytorch__pytorch | torch/ao/quantization/fx/quantize_handler.py | {
"start": 6075,
"end": 6134
} | class ____(QuantizeHandler):
pass
| BinaryOpQuantizeHandler |
python | openai__gym | gym/wrappers/normalize.py | {
"start": 3728,
"end": 5712
} | class ____(gym.core.Wrapper):
r"""This wrapper will normalize immediate rewards s.t. their exponential moving average has a fixed variance.
The exponential moving average will have variance :math:`(1 - \gamma)^2`.
Note:
The scaling depends on past trajectories and rewards will not be scaled correctly if the wrapper was newly
instantiated or the policy was changed recently.
"""
def __init__(
self,
env: gym.Env,
gamma: float = 0.99,
epsilon: float = 1e-8,
):
"""This wrapper will normalize immediate rewards s.t. their exponential moving average has a fixed variance.
Args:
env (env): The environment to apply the wrapper
epsilon (float): A stability parameter
gamma (float): The discount factor that is used in the exponential moving average.
"""
super().__init__(env)
self.num_envs = getattr(env, "num_envs", 1)
self.is_vector_env = getattr(env, "is_vector_env", False)
self.return_rms = RunningMeanStd(shape=())
self.returns = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step(self, action):
"""Steps through the environment, normalizing the rewards returned."""
obs, rews, terminateds, truncateds, infos = self.env.step(action)
if not self.is_vector_env:
rews = np.array([rews])
self.returns = self.returns * self.gamma + rews
rews = self.normalize(rews)
dones = np.logical_or(terminateds, truncateds)
self.returns[dones] = 0.0
if not self.is_vector_env:
rews = rews[0]
return obs, rews, terminateds, truncateds, infos
def normalize(self, rews):
"""Normalizes the rewards with the running mean rewards and their variance."""
self.return_rms.update(self.returns)
return rews / np.sqrt(self.return_rms.var + self.epsilon)
| NormalizeReward |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/ParameterSystem.py | {
"start": 162,
"end": 4308
} | class ____(GroupParameter):
"""
ParameterSystem is a subclass of GroupParameter that manages a tree of
sub-parameters with a set of interdependencies--changing any one parameter
may affect other parameters in the system.
See parametertree/SystemSolver for more information.
NOTE: This API is experimental and may change substantially across minor
version numbers.
"""
def __init__(self, *args, **kwds):
GroupParameter.__init__(self, *args, **kwds)
self._system = None
self._fixParams = [] # all auto-generated 'fixed' params
sys = kwds.pop('system', None)
if sys is not None:
self.setSystem(sys)
self._ignoreChange = [] # params whose changes should be ignored temporarily
self.sigTreeStateChanged.connect(self.updateSystem)
def setSystem(self, sys):
self._system = sys
# auto-generate defaults to match child parameters
defaults = {}
vals = {}
for param in self:
name = param.name()
constraints = ''
if hasattr(sys, '_' + name):
constraints += 'n'
if not param.readonly():
constraints += 'f'
if 'n' in constraints:
ch = param.addChild(dict(name='fixed', type='bool', value=False))
self._fixParams.append(ch)
param.setReadonly(True)
param.setOpts(expanded=False)
else:
vals[name] = param.value()
ch = param.addChild(dict(name='fixed', type='bool', value=True, readonly=True))
#self._fixParams.append(ch)
defaults[name] = [None, param.type(), None, constraints]
sys.defaultState.update(defaults)
sys.reset()
for name, value in vals.items():
setattr(sys, name, value)
self.updateAllParams()
def updateSystem(self, param, changes):
changes = [ch for ch in changes if ch[0] not in self._ignoreChange]
sets = [ch[0] for ch in changes if ch[1] == 'value']
for param in sets:
if param in self._fixParams:
parent = param.parent()
setattr(self._system, parent.name(), parent.value() if parent.hasValue() else None)
else:
setattr(self._system, param.name(), param.value())
self.updateAllParams()
def updateAllParams(self):
try:
self.sigTreeStateChanged.disconnect(self.updateSystem)
for name, state in self._system._vars.items():
param = self.child(name)
try:
v = getattr(self._system, name)
if self._system._vars[name][2] is None:
self.updateParamState(self.child(name), 'autoSet')
param.setValue(v)
else:
self.updateParamState(self.child(name), 'fixed')
except RuntimeError:
self.updateParamState(param, 'autoUnset')
finally:
self.sigTreeStateChanged.connect(self.updateSystem)
def updateParamState(self, param, state):
if state == 'autoSet':
bg = fn.mkBrush((200, 255, 200, 255))
bold = False
readonly = True
elif state == 'autoUnset':
bg = fn.mkBrush(None)
bold = False
readonly = False
elif state == 'fixed':
bg = fn.mkBrush('y')
bold = True
readonly = False
else:
raise ValueError("'state' must be one of 'autoSet', 'autoUnset', or 'fixed'")
param.setReadonly(readonly)
#for item in param.items:
#item.setBackground(0, bg)
#f = item.font(0)
#f.setWeight(f.Bold if bold else f.Normal)
#item.setFont(0, f)
| ParameterSystem |
python | django__django | tests/forms_tests/tests/tests.py | {
"start": 732,
"end": 906
} | class ____(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ["multi_choice"]
model = ChoiceFieldModel
| ChoiceFieldExclusionForm |
python | FactoryBoy__factory_boy | factory/base.py | {
"start": 4127,
"end": 13605
} | class ____:
def __init__(self):
self.factory = None
self.base_factory = None
self.base_declarations = {}
self.parameters = {}
self.parameters_dependencies = {}
self.pre_declarations = builder.DeclarationSet()
self.post_declarations = builder.DeclarationSet()
self._counter = None
self.counter_reference = None
@property
def declarations(self):
base_declarations = dict(self.base_declarations)
for name, param in utils.sort_ordered_objects(self.parameters.items(), getter=lambda item: item[1]):
base_declarations.update(param.as_declarations(name, base_declarations))
return base_declarations
def _build_default_options(self):
""""Provide the default value for all allowed fields.
Custom FactoryOptions classes should override this method
to update() its return value.
"""
def is_model(meta, value):
if isinstance(value, FactoryMetaClass):
raise TypeError(
"%s is already a %s"
% (repr(value), Factory.__name__)
)
return [
OptionDefault('model', None, inherit=True, checker=is_model),
OptionDefault('abstract', False, inherit=False),
OptionDefault('strategy', enums.CREATE_STRATEGY, inherit=True),
OptionDefault('inline_args', (), inherit=True),
OptionDefault('exclude', (), inherit=True),
OptionDefault('rename', {}, inherit=True),
]
def _fill_from_meta(self, meta, base_meta):
# Exclude private/protected fields from the meta
if meta is None:
meta_attrs = {}
else:
meta_attrs = {
k: v
for (k, v) in vars(meta).items()
if not k.startswith('_')
}
for option in self._build_default_options():
assert not hasattr(self, option.name), "Can't override field %s." % option.name
value = option.apply(meta, base_meta)
meta_attrs.pop(option.name, None)
setattr(self, option.name, value)
if meta_attrs:
# Some attributes in the Meta aren't allowed here
raise TypeError(
"'class Meta' for %r got unknown attribute(s) %s"
% (self.factory, ','.join(sorted(meta_attrs.keys()))))
def contribute_to_class(self, factory, meta=None, base_meta=None, base_factory=None, params=None):
self.factory = factory
self.base_factory = base_factory
self._fill_from_meta(meta=meta, base_meta=base_meta)
self.model = self.get_model_class()
if self.model is None:
self.abstract = True
self.counter_reference = self._get_counter_reference()
# Scan the inheritance chain, starting from the furthest point,
# excluding the current class, to retrieve all declarations.
for parent in reversed(self.factory.__mro__[1:]):
if not hasattr(parent, '_meta'):
continue
self.base_declarations.update(parent._meta.base_declarations)
self.parameters.update(parent._meta.parameters)
for k, v in vars(self.factory).items():
if self._is_declaration(k, v):
self.base_declarations[k] = v
if params is not None:
for k, v in utils.sort_ordered_objects(vars(params).items(), getter=lambda item: item[1]):
if not k.startswith('_'):
self.parameters[k] = declarations.SimpleParameter.wrap(v)
self._check_parameter_dependencies(self.parameters)
self.pre_declarations, self.post_declarations = builder.parse_declarations(self.declarations)
def _get_counter_reference(self):
"""Identify which factory should be used for a shared counter."""
if (self.model is not None
and self.base_factory is not None
and self.base_factory._meta.model is not None
and issubclass(self.model, self.base_factory._meta.model)):
return self.base_factory._meta.counter_reference
else:
return self
def _initialize_counter(self):
"""Initialize our counter pointer.
If we're the top-level factory, instantiate a new counter
Otherwise, point to the top-level factory's counter.
"""
if self._counter is not None:
return
if self.counter_reference is self:
self._counter = _Counter(seq=self.factory._setup_next_sequence())
else:
self.counter_reference._initialize_counter()
self._counter = self.counter_reference._counter
def next_sequence(self):
"""Retrieve a new sequence ID.
This will call, in order:
- next_sequence from the base factory, if provided
- _setup_next_sequence, if this is the 'toplevel' factory and the
sequence counter wasn't initialized yet; then increase it.
"""
self._initialize_counter()
return self._counter.next()
def reset_sequence(self, value=None, force=False):
self._initialize_counter()
if self.counter_reference is not self and not force:
raise ValueError(
"Can't reset a sequence on descendant factory %r; reset sequence on %r or use `force=True`."
% (self.factory, self.counter_reference.factory))
if value is None:
value = self.counter_reference.factory._setup_next_sequence()
self._counter.reset(value)
def prepare_arguments(self, attributes):
"""Convert an attributes dict to a (args, kwargs) tuple."""
kwargs = dict(attributes)
# 1. Extension points
kwargs = self.factory._adjust_kwargs(**kwargs)
# 2. Remove hidden objects
kwargs = {
k: v for k, v in kwargs.items()
if k not in self.exclude and k not in self.parameters and v is not declarations.SKIP
}
# 3. Rename fields
for old_name, new_name in self.rename.items():
if old_name in kwargs:
kwargs[new_name] = kwargs.pop(old_name)
# 4. Extract inline args
args = tuple(
kwargs.pop(arg_name)
for arg_name in self.inline_args
)
return args, kwargs
def instantiate(self, step, args, kwargs):
model = self.get_model_class()
if step.builder.strategy == enums.BUILD_STRATEGY:
return self.factory._build(model, *args, **kwargs)
elif step.builder.strategy == enums.CREATE_STRATEGY:
return self.factory._create(model, *args, **kwargs)
else:
assert step.builder.strategy == enums.STUB_STRATEGY
return StubObject(**kwargs)
def use_postgeneration_results(self, step, instance, results):
self.factory._after_postgeneration(
instance,
create=step.builder.strategy == enums.CREATE_STRATEGY,
results=results,
)
def _is_declaration(self, name, value):
"""Determines if a class attribute is a field value declaration.
Based on the name and value of the class attribute, return ``True`` if
it looks like a declaration of a default field value, ``False`` if it
is private (name starts with '_') or a classmethod or staticmethod.
"""
if isinstance(value, (classmethod, staticmethod)):
return False
elif enums.get_builder_phase(value):
# All objects with a defined 'builder phase' are declarations.
return True
return not name.startswith("_")
def _check_parameter_dependencies(self, parameters):
"""Find out in what order parameters should be called."""
# Warning: parameters only provide reverse dependencies; we reverse them into standard dependencies.
# deep_revdeps: set of fields a field depend indirectly upon
deep_revdeps = collections.defaultdict(set)
# Actual, direct dependencies
deps = collections.defaultdict(set)
for name, parameter in parameters.items():
if isinstance(parameter, declarations.Parameter):
field_revdeps = parameter.get_revdeps(parameters)
if not field_revdeps:
continue
deep_revdeps[name] = set.union(*(deep_revdeps[dep] for dep in field_revdeps))
deep_revdeps[name] |= set(field_revdeps)
for dep in field_revdeps:
deps[dep].add(name)
# Check for cyclical dependencies
cyclic = [name for name, field_deps in deep_revdeps.items() if name in field_deps]
if cyclic:
raise errors.CyclicDefinitionError(
"Cyclic definition detected on %r; Params around %s"
% (self.factory, ', '.join(cyclic)))
return deps
def get_model_class(self):
"""Extension point for loading model classes.
This can be overridden in framework-specific subclasses to hook into
existing model repositories, for instance.
"""
return self.model
def __str__(self):
return "<%s for %s>" % (self.__class__.__name__, self.factory.__name__)
def __repr__(self):
return str(self)
# Factory base classes
| FactoryOptions |
python | getsentry__sentry | src/sentry/utils/math.py | {
"start": 872,
"end": 1226
} | class ____(MovingAverage):
def __init__(self, weight: float):
super().__init__()
assert 0 < weight and weight < 1
self.weight = weight
def update(self, n: int, avg: float, value: float) -> float:
if n == 0:
return value
return value * self.weight + avg * (1 - self.weight)
| ExponentialMovingAverage |
python | ray-project__ray | python/ray/_private/ray_logging/__init__.py | {
"start": 7148,
"end": 7791
} | class ____:
# Timestamp of the earliest log message seen of this pattern.
timestamp: int
# The number of un-printed occurrances for this pattern.
count: int
# Latest instance of this log pattern.
line: int
# Latest metadata dict for this log pattern, not including the lines field.
metadata: LogBatch
# Set of (ip, pid) sources which have emitted this pattern.
sources: Set[Tuple[str, int]]
# The string that should be printed to stdout.
def formatted(self) -> str:
return self.line + _color(
f" [repeated {self.count}x across cluster]" + _warn_once()
)
| DedupState |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/llama_index/readers/microsoft_sharepoint/base.py | {
"start": 541,
"end": 32622
} | class ____(BasePydanticReader, ResourcesReaderMixin, FileSystemReaderMixin):
"""
SharePoint reader.
Reads folders from the SharePoint site from a folder under documents.
Args:
client_id (str): The Application ID for the app registered in Microsoft Azure Portal.
The application must also be configured with MS Graph permissions "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All.
client_secret (str): The application secret for the app registered in Azure.
tenant_id (str): Unique identifier of the Azure Active Directory Instance.
sharepoint_site_name (Optional[str]): The name of the SharePoint site to download from.
sharepoint_folder_path (Optional[str]): The path of the SharePoint folder to download from.
sharepoint_folder_id (Optional[str]): The ID of the SharePoint folder to download from. Overrides sharepoint_folder_path.
drive_name (Optional[str]): The name of the drive to download from.
drive_id (Optional[str]): The ID of the drive to download from. Overrides drive_name.
required_exts (Optional[List[str]]): List of required extensions. Default is None.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file extension to a BaseReader class that specifies how to convert that
file to text. See `SimpleDirectoryReader` for more details.
attach_permission_metadata (bool): If True, the reader will attach permission metadata to the documents. Set to False if your vector store
only supports flat metadata (i.e. no nested fields or lists), or to avoid the additional API calls.
"""
client_id: str = None
client_secret: str = None
tenant_id: str = None
sharepoint_site_name: Optional[str] = None
sharepoint_site_id: Optional[str] = None
sharepoint_folder_path: Optional[str] = None
sharepoint_folder_id: Optional[str] = None
required_exts: Optional[List[str]] = None
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = Field(
default=None, exclude=True
)
attach_permission_metadata: bool = True
drive_name: Optional[str] = None
drive_id: Optional[str] = None
_authorization_headers = PrivateAttr()
_site_id_with_host_name = PrivateAttr()
_drive_id_endpoint = PrivateAttr()
_drive_id = PrivateAttr()
def __init__(
self,
client_id: str,
client_secret: str,
tenant_id: str,
sharepoint_site_name: Optional[str] = None,
sharepoint_folder_path: Optional[str] = None,
sharepoint_folder_id: Optional[str] = None,
required_exts: Optional[List[str]] = None,
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
drive_name: Optional[str] = None,
drive_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(
client_id=client_id,
client_secret=client_secret,
tenant_id=tenant_id,
sharepoint_site_name=sharepoint_site_name,
sharepoint_folder_path=sharepoint_folder_path,
sharepoint_folder_id=sharepoint_folder_id,
required_exts=required_exts,
file_extractor=file_extractor,
drive_name=drive_name,
drive_id=drive_id,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "SharePointReader"
def _get_access_token(self) -> str:
"""
Gets the access_token for accessing file from SharePoint.
Returns:
str: The access_token for accessing the file.
Raises:
ValueError: If there is an error in obtaining the access_token.
"""
authority = f"https://login.microsoftonline.com/{self.tenant_id}/oauth2/token"
payload = {
"grant_type": "client_credentials",
"client_id": self.client_id,
"client_secret": self.client_secret,
"resource": "https://graph.microsoft.com/",
}
response = requests.post(
url=authority,
data=payload,
)
json_response = response.json()
if response.status_code == 200 and "access_token" in json_response:
return json_response["access_token"]
else:
error_message = json_response.get("error_description") or json_response.get(
"error"
)
logger.error("Error retrieving access token: %s", json_response["error"])
raise ValueError(f"Error retrieving access token: {error_message}")
def _send_request_with_retry(self, request: requests.Request) -> requests.Response:
"""
Makes a request to the SharePoint API with the provided request object.
If the request fails with a 401 status code, the access token is refreshed and the request is retried once.
"""
curr_headers = (request.headers or {}).copy()
curr_headers.update(self._authorization_headers)
request.headers = curr_headers
prepared_request = request.prepare()
with requests.Session() as session:
response = session.send(prepared_request)
if response.status_code == 401:
# 401 status code indicates that the access token has expired
# refresh the token and retry once
logger.debug("Received 401. Refreshing access token.")
access_token = self._get_access_token()
self._authorization_headers = {
"Authorization": f"Bearer {access_token}"
}
curr_headers.update(self._authorization_headers)
request.headers = curr_headers
prepared_request = request.prepare()
response = session.send(prepared_request)
response.raise_for_status()
return response
def _send_get_with_retry(self, url: str) -> requests.Response:
request = requests.Request(
method="GET",
url=url,
)
return self._send_request_with_retry(request)
def _get_site_id_with_host_name(
self, access_token, sharepoint_site_name: Optional[str]
) -> str:
"""
Retrieves the site ID of a SharePoint site using the provided site name.
Args:
sharepoint_site_name (str): The name of the SharePoint site.
Returns:
str: The ID of the SharePoint site.
Raises:
Exception: If the specified SharePoint site is not found.
"""
if hasattr(self, "_site_id_with_host_name"):
return self._site_id_with_host_name
self._authorization_headers = {"Authorization": f"Bearer {access_token}"}
if self.sharepoint_site_id:
return self.sharepoint_site_id
if not (sharepoint_site_name):
raise ValueError("The SharePoint site name or ID must be provided.")
site_information_endpoint = f"https://graph.microsoft.com/v1.0/sites"
while site_information_endpoint:
response = self._send_get_with_retry(site_information_endpoint)
json_response = response.json()
if response.status_code == 200 and "value" in json_response:
if (
len(json_response["value"]) > 0
and "id" in json_response["value"][0]
):
# find the site with the specified name
for site in json_response["value"]:
if (
"name" in site
and site["name"].lower() == sharepoint_site_name.lower()
):
return site["id"]
site_information_endpoint = json_response.get(
"@odata.nextLink", None
)
else:
raise ValueError(
f"The specified sharepoint site {sharepoint_site_name} is not found."
)
else:
error_message = json_response.get(
"error_description"
) or json_response.get("error")
logger.error("Error retrieving site ID: %s", json_response["error"])
raise ValueError(f"Error retrieving site ID: {error_message}")
raise ValueError(
f"The specified sharepoint site {sharepoint_site_name} is not found."
)
def _get_drive_id(self) -> str:
"""
Retrieves the drive ID of the SharePoint site.
Returns:
str: The ID of the SharePoint site drive.
Raises:
ValueError: If there is an error in obtaining the drive ID.
"""
if hasattr(self, "_drive_id"):
return self._drive_id
if self.drive_id:
return self.drive_id
self._drive_id_endpoint = f"https://graph.microsoft.com/v1.0/sites/{self._site_id_with_host_name}/drives"
response = self._send_get_with_retry(self._drive_id_endpoint)
json_response = response.json()
if response.status_code == 200 and "value" in json_response:
if len(json_response["value"]) > 0 and self.drive_name is not None:
for drive in json_response["value"]:
if drive["name"].lower() == self.drive_name.lower():
return drive["id"]
raise ValueError(f"The specified drive {self.drive_name} is not found.")
if len(json_response["value"]) > 0 and "id" in json_response["value"][0]:
return json_response["value"][0]["id"]
else:
raise ValueError(
"Error occurred while fetching the drives for the sharepoint site."
)
else:
error_message = json_response.get("error_description") or json_response.get(
"error"
)
logger.error("Error retrieving drive ID: %s", json_response["error"])
raise ValueError(f"Error retrieving drive ID: {error_message}")
def _get_sharepoint_folder_id(self, folder_path: str) -> str:
"""
Retrieves the folder ID of the SharePoint site.
Args:
folder_path (str): The path of the folder in the SharePoint site.
Returns:
str: The ID of the SharePoint site folder.
"""
folder_id_endpoint = (
f"{self._drive_id_endpoint}/{self._drive_id}/root:/{folder_path}"
)
response = self._send_get_with_retry(folder_id_endpoint)
if response.status_code == 200 and "id" in response.json():
return response.json()["id"]
else:
error_message = response.json().get("error", "Unknown error")
logger.error("Error retrieving folder ID: %s", error_message)
raise ValueError(f"Error retrieving folder ID: {error_message}")
def _download_files_and_extract_metadata(
self,
folder_id: str,
download_dir: str,
include_subfolders: bool = False,
) -> Dict[str, str]:
"""
Downloads files from the specified folder ID and extracts metadata.
Args:
folder_id (str): The ID of the folder from which the files should be downloaded.
download_dir (str): The directory where the files should be downloaded.
include_subfolders (bool): If True, files from all subfolders are downloaded.
Returns:
Dict[str, str]: A dictionary containing the metadata of the downloaded files.
Raises:
ValueError: If there is an error in downloading the files.
"""
files_path = self.list_resources(
sharepoint_site_name=self.sharepoint_site_name,
sharepoint_site_id=self.sharepoint_site_id,
sharepoint_folder_id=folder_id,
)
metadata = {}
for file_path in files_path:
item = self._get_item_from_path(file_path)
metadata.update(self._download_file(item, download_dir))
return metadata
def _get_file_content_by_url(self, item: Dict[str, Any]) -> bytes:
"""
Retrieves the content of the file from the provided URL.
Args:
item (Dict[str, Any]): Dictionary containing file metadata.
Returns:
bytes: The content of the file.
"""
file_download_url = item["@microsoft.graph.downloadUrl"]
response = requests.get(file_download_url)
if response.status_code != 200:
json_response = response.json()
error_message = json_response.get("error_description") or json_response.get(
"error"
)
logger.error("Error downloading file content: %s", json_response["error"])
raise ValueError(f"Error downloading file content: {error_message}")
return response.content
def _download_file_by_url(self, item: Dict[str, Any], download_dir: str) -> str:
"""
Downloads the file from the provided URL.
Args:
item (Dict[str, Any]): Dictionary containing file metadata.
download_dir (str): The directory where the files should be downloaded.
Returns:
str: The path of the downloaded file in the temporary directory.
"""
# Get the download URL for the file.
file_name = item["name"]
content = self._get_file_content_by_url(item)
# Create the directory if it does not exist and save the file.
if not os.path.exists(download_dir):
os.makedirs(download_dir)
file_path = os.path.join(download_dir, file_name)
with open(file_path, "wb") as f:
f.write(content)
return file_path
def _get_permissions_info(self, item: Dict[str, Any]) -> Dict[str, str]:
"""
Extracts the permissions information for the file. For more information, see:
https://learn.microsoft.com/en-us/graph/api/resources/permission?view=graph-rest-1.0.
Args:
item (Dict[str, Any]): Dictionary containing file metadata.
Returns:
Dict[str, str]: A dictionary containing the extracted permissions information.
"""
item_id = item.get("id")
permissions_info_endpoint = (
f"{self._drive_id_endpoint}/{self._drive_id}/items/{item_id}/permissions"
)
response = self._send_get_with_retry(permissions_info_endpoint)
permissions = response.json()
identity_sets = []
for permission in permissions["value"]:
# user type permissions
granted_to = permission.get("grantedToV2", None)
if granted_to:
identity_sets.append(granted_to)
# link type permissions
granted_to_identities = permission.get("grantedToIdentitiesV2", [])
for identity in granted_to_identities:
identity_sets.append(identity)
# Extract the identity information from each identity set
# they can be 'application', 'device', 'user', 'group', 'siteUser' or 'siteGroup'
# 'siteUser' and 'siteGroup' are site-specific, 'group' is for Microsoft 365 groups
permissions_dict = {}
for identity_set in identity_sets:
for identity, identity_info in identity_set.items():
id = identity_info.get("id")
display_name = identity_info.get("displayName")
ids_key = f"allowed_{identity}_ids"
display_names_key = f"allowed_{identity}_display_names"
if ids_key not in permissions_dict:
permissions_dict[ids_key] = []
if display_names_key not in permissions_dict:
permissions_dict[display_names_key] = []
permissions_dict[ids_key].append(id)
permissions_dict[display_names_key].append(display_name)
# sort to get consistent results, if possible
for key in permissions_dict:
try:
permissions_dict[key] = sorted(permissions_dict[key])
except TypeError:
pass
return permissions_dict
def _extract_metadata_for_file(self, item: Dict[str, Any]) -> Dict[str, str]:
"""
Extracts metadata related to the file.
Parameters
----------
- item (Dict[str, str]): Dictionary containing file metadata.
Returns
-------
- Dict[str, str]: A dictionary containing the extracted metadata.
"""
# Extract the required metadata for file.
if self.attach_permission_metadata:
metadata = self._get_permissions_info(item)
else:
metadata = {}
metadata.update(
{
"file_id": item.get("id"),
"file_name": item.get("name"),
"url": item.get("webUrl"),
"file_path": item.get("file_path"),
}
)
return metadata
def _download_file(
self,
item: Dict[str, Any],
download_dir: str,
):
metadata = {}
file_path = self._download_file_by_url(item, download_dir)
metadata[file_path] = self._extract_metadata_for_file(item)
return metadata
def _download_files_from_sharepoint(
self,
download_dir: str,
sharepoint_site_name: Optional[str],
sharepoint_folder_path: Optional[str],
sharepoint_folder_id: Optional[str],
recursive: bool,
) -> Dict[str, str]:
"""
Downloads files from the specified folder and returns the metadata for the downloaded files.
Args:
download_dir (str): The directory where the files should be downloaded.
sharepoint_site_name (str): The name of the SharePoint site.
sharepoint_folder_path (str): The path of the folder in the SharePoint site.
recursive (bool): If True, files from all subfolders are downloaded.
Returns:
Dict[str, str]: A dictionary containing the metadata of the downloaded files.
"""
access_token = self._get_access_token()
self._site_id_with_host_name = self._get_site_id_with_host_name(
access_token, sharepoint_site_name
)
self._drive_id = self._get_drive_id()
if not sharepoint_folder_id and sharepoint_folder_path:
sharepoint_folder_id = self._get_sharepoint_folder_id(
sharepoint_folder_path
)
return self._download_files_and_extract_metadata(
sharepoint_folder_id,
download_dir,
recursive,
)
def _exclude_access_control_metadata(
self, documents: List[Document]
) -> List[Document]:
"""
Excludes the access control metadata from the documents for embedding and LLM calls.
Args:
documents (List[Document]): A list of documents.
Returns:
List[Document]: A list of documents with access control metadata excluded.
"""
for doc in documents:
access_control_keys = [
key for key in doc.metadata if key.startswith("allowed_")
]
doc.excluded_embed_metadata_keys.extend(access_control_keys)
doc.excluded_llm_metadata_keys.extend(access_control_keys)
return documents
def _load_documents_with_metadata(
self,
files_metadata: Dict[str, Any],
download_dir: str,
recursive: bool,
) -> List[Document]:
"""
Loads the documents from the downloaded files.
Args:
files_metadata (Dict[str,Any]): A dictionary containing the metadata of the downloaded files.
download_dir (str): The directory where the files should be downloaded.
recursive (bool): If True, files from all subfolders are downloaded.
Returns:
List[Document]: A list containing the documents with metadata.
"""
def get_metadata(filename: str) -> Any:
return files_metadata[filename]
simple_loader = SimpleDirectoryReader(
download_dir,
required_exts=self.required_exts,
file_extractor=self.file_extractor,
file_metadata=get_metadata,
recursive=recursive,
)
docs = simple_loader.load_data()
if self.attach_permission_metadata:
docs = self._exclude_access_control_metadata(docs)
return docs
def load_data(
self,
sharepoint_site_name: Optional[str] = None,
sharepoint_folder_path: Optional[str] = None,
sharepoint_folder_id: Optional[str] = None,
recursive: bool = True,
) -> List[Document]:
"""
Loads the files from the specified folder in the SharePoint site.
Args:
sharepoint_site_name (Optional[str]): The name of the SharePoint site.
sharepoint_folder_path (Optional[str]): The path of the folder in the SharePoint site.
recursive (bool): If True, files from all subfolders are downloaded.
Returns:
List[Document]: A list containing the documents with metadata.
Raises:
Exception: If an error occurs while accessing SharePoint site.
"""
# If no arguments are provided to load_data, default to the object attributes
if not sharepoint_site_name:
sharepoint_site_name = self.sharepoint_site_name
if not sharepoint_folder_path:
sharepoint_folder_path = self.sharepoint_folder_path
if not sharepoint_folder_id:
sharepoint_folder_id = self.sharepoint_folder_id
# TODO: make both of these values optional — and just default to the client ID defaults
if not (sharepoint_site_name or self.sharepoint_site_id):
raise ValueError("sharepoint_site_name must be provided.")
try:
with tempfile.TemporaryDirectory() as temp_dir:
files_metadata = self._download_files_from_sharepoint(
temp_dir,
sharepoint_site_name,
sharepoint_folder_path,
sharepoint_folder_id,
recursive,
)
# return self.files_metadata
return self._load_documents_with_metadata(
files_metadata, temp_dir, recursive
)
except Exception as exp:
logger.error("An error occurred while accessing SharePoint: %s", exp)
def _list_folder_contents(
self, folder_id: str, recursive: bool, current_path: str
) -> List[Path]:
"""
Helper method to fetch the contents of a folder.
Args:
folder_id (str): ID of the folder whose contents are to be listed.
recursive (bool): Whether to include subfolders recursively.
Returns:
List[Path]: List of file paths.
"""
folder_contents_endpoint = (
f"{self._drive_id_endpoint}/{self._drive_id}/items/{folder_id}/children"
)
response = self._send_get_with_retry(folder_contents_endpoint)
items = response.json().get("value", [])
file_paths = []
for item in items:
if "folder" in item and recursive:
# Recursive call for subfolder
subfolder_id = item["id"]
subfolder_paths = self._list_folder_contents(
subfolder_id, recursive, os.path.join(current_path, item["name"])
)
file_paths.extend(subfolder_paths)
elif "file" in item:
# Append file path
file_path = Path(os.path.join(current_path, item["name"]))
file_paths.append(file_path)
return file_paths
def _list_drive_contents(self) -> List[Path]:
"""
Helper method to fetch the contents of the drive.
Returns:
List[Path]: List of file paths.
"""
drive_contents_endpoint = (
f"{self._drive_id_endpoint}/{self._drive_id}/root/children"
)
response = self._send_get_with_retry(drive_contents_endpoint)
items = response.json().get("value", [])
file_paths = []
for item in items:
if "folder" in item:
# Append folder path
folder_paths = self._list_folder_contents(
item["id"], recursive=True, current_path=item["name"]
)
file_paths.extend(folder_paths)
elif "file" in item:
# Append file path
file_path = Path(item["name"])
file_paths.append(file_path)
return file_paths
def list_resources(
self,
sharepoint_site_name: Optional[str] = None,
sharepoint_folder_path: Optional[str] = None,
sharepoint_folder_id: Optional[str] = None,
sharepoint_site_id: Optional[str] = None,
recursive: bool = True,
) -> List[Path]:
"""
Lists the files in the specified folder in the SharePoint site.
Args:
**kwargs: Additional keyword arguments.
Returns:
List[Path]: A list of paths of the files in the specified folder.
Raises:
Exception: If an error occurs while accessing SharePoint site.
"""
# If no arguments are provided to load_data, default to the object attributes
if not sharepoint_site_name:
sharepoint_site_name = self.sharepoint_site_name
if not sharepoint_folder_path:
sharepoint_folder_path = self.sharepoint_folder_path
if not sharepoint_folder_id:
sharepoint_folder_id = self.sharepoint_folder_id
if not sharepoint_site_id:
sharepoint_site_id = self.sharepoint_site_id
if not (sharepoint_site_name or sharepoint_site_id):
raise ValueError(
"sharepoint_site_name or sharepoint_site_id must be provided."
)
file_paths = []
try:
access_token = self._get_access_token()
self._site_id_with_host_name = self._get_site_id_with_host_name(
access_token, sharepoint_site_name
)
self._drive_id = self._get_drive_id()
if sharepoint_folder_path:
if not sharepoint_folder_id:
sharepoint_folder_id = self._get_sharepoint_folder_id(
sharepoint_folder_path
)
# Fetch folder contents
folder_contents = self._list_folder_contents(
sharepoint_folder_id,
recursive,
os.path.join(sharepoint_site_name, sharepoint_folder_path),
)
file_paths.extend(folder_contents)
else:
# Fetch drive contents
drive_contents = self._list_drive_contents()
file_paths.extend(drive_contents)
except Exception as exp:
logger.error("An error occurred while listing files in SharePoint: %s", exp)
raise
return file_paths
def _get_item_from_path(self, input_file: Path) -> Dict[str, Any]:
"""
Retrieves the item details for a specified file in SharePoint.
Args:
input_file (Path): The path of the file in SharePoint.
Should include the SharePoint site name and the folder path. e.g. "site_name/folder_path/file_name".
Returns:
Dict[str, Any]: Dictionary containing the item details.
"""
# Get the file ID
# remove the site_name prefix
parts = [part for part in input_file.parts if part != self.sharepoint_site_name]
# URI escape each part of the path
escaped_parts = [quote(part) for part in parts]
file_path = "/".join(escaped_parts)
endpoint = f"{self._drive_id_endpoint}/{self._drive_id}/root:/{file_path}"
response = self._send_get_with_retry(endpoint)
return response.json()
def get_permission_info(self, resource_id: str, **kwargs) -> Dict:
"""
Get a dictionary of information about the permissions of a specific resource.
"""
try:
item = self._get_item_from_path(Path(resource_id))
return self._get_permissions_info(item)
except Exception as exp:
logger.error(
"An error occurred while fetching file information from SharePoint: %s",
exp,
)
raise
def get_resource_info(self, resource_id: str, **kwargs) -> Dict:
"""
Retrieves metadata for a specified file in SharePoint without downloading it.
Args:
input_file (Path): The path of the file in SharePoint. The path should include
the SharePoint site name and the folder path. e.g. "site_name/folder_path/file_name".
"""
try:
item = self._get_item_from_path(Path(resource_id))
info_dict = {
"file_path": resource_id,
"size": item.get("size"),
"created_at": item.get("createdDateTime"),
"modified_at": item.get("lastModifiedDateTime"),
"etag": item.get("eTag"),
"url": item.get("webUrl"),
}
if (
self.attach_permission_metadata
): # changes in access control should trigger a reingestion of the file
permissions = self._get_permissions_info(item)
info_dict.update(permissions)
return {
meta_key: meta_value
for meta_key, meta_value in info_dict.items()
if meta_value is not None
}
except Exception as exp:
logger.error(
"An error occurred while fetching file information from SharePoint: %s",
exp,
)
raise
def load_resource(self, resource_id: str, **kwargs) -> List[Document]:
try:
access_token = self._get_access_token()
self._site_id_with_host_name = self._get_site_id_with_host_name(
access_token, self.sharepoint_site_name
)
self._drive_id = self._get_drive_id()
path = Path(resource_id)
item = self._get_item_from_path(path)
with tempfile.TemporaryDirectory() as temp_dir:
metadata = self._download_file(item, temp_dir)
return self._load_documents_with_metadata(
metadata, temp_dir, recursive=False
)
except Exception as exp:
logger.error(
"An error occurred while reading file from SharePoint: %s", exp
)
raise
def read_file_content(self, input_file: Path, **kwargs) -> bytes:
try:
access_token = self._get_access_token()
self._site_id_with_host_name = self._get_site_id_with_host_name(
access_token, self.sharepoint_site_name
)
self._drive_id = self._get_drive_id()
item = self._get_item_from_path(input_file)
return self._get_file_content_by_url(item)
except Exception as exp:
logger.error(
"An error occurred while reading file content from SharePoint: %s", exp
)
raise
| SharePointReader |
python | miyuchina__mistletoe | docs/__init__.py | {
"start": 968,
"end": 2261
} | class ____(HtmlRenderer):
def render_link(self, token):
return super().render_link(self._replace_link(token))
def render_document(self, token, name="README.md"):
pattern = "<html>{}<body>{}</body></html>"
self.footnotes.update(token.footnotes)
for filename, new_link in getattr(self, 'files', {}).items():
for k, v in self.footnotes.items():
if v == filename:
self.footnotes[k] = new_link
subtitle = ' | {}'.format('version ' + __version__ if name == 'README.md' else name.split('.')[0].lower())
body = '\n'.join([self.render(child) for child in token.children])
return pattern.format(METADATA.format(subtitle), body)
def _replace_link(self, token):
token.target = getattr(self, 'files', {}).get(token.target, token.target)
return token
def build(files=None):
files = files or INCLUDE
for f in files:
with open(f, 'r', encoding='utf-8') as fin:
rendered_file = 'docs/' + files[f]
with open(rendered_file, 'w+', encoding='utf-8') as fout:
with DocRenderer() as renderer:
renderer.files = files
print(renderer.render_document(Document(fin), f), file=fout)
| DocRenderer |
python | huggingface__transformers | src/transformers/models/ovis2/configuration_ovis2.py | {
"start": 733,
"end": 4970
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ovis2VisionModel`]. It is used to instantiate a
Ovis2VisionModel model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of Ovis2.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2816):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the RMSNorm layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
qkv_bias (`bool`, *optional*, defaults to `False`):
Whether to add a learnable bias to the query, key, and value sequences at each attention head.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to add a learnable bias to the MLP layers.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
vocab_size (`int`, *optional*, defaults to 16384):
Vocabulary size of the Vision Transformer.
hidden_stride (`int`, *optional*, defaults to 1):
The stride of the hidden layer in the Vision Transformer.
num_visual_indicator_tokens (`int`, *optional*, defaults to 5):
Number of visual indicator tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
tokenize_function (`str`, *optional*, defaults to `"softmax"`):
The function used to tokenize the visual indicator tokens.
"""
base_config_key = "vision_config"
def __init__(
self,
hidden_size: int = 1024,
intermediate_size: int = 2816,
num_hidden_layers: int = 24,
num_attention_heads: int = 8,
num_channels: int = 3,
image_size: int = 224,
patch_size: int = 14,
rms_norm_eps: float = 1e-5,
attention_dropout: float = 0.0,
qkv_bias: bool = False,
mlp_bias: bool = False,
hidden_act="silu",
vocab_size=16384,
hidden_stride=1,
num_visual_indicator_tokens=5,
initializer_range=0.02,
tokenize_function="softmax",
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.hidden_act = hidden_act
self.qkv_bias = qkv_bias
self.mlp_bias = mlp_bias
self.rms_norm_eps = rms_norm_eps
self.vocab_size = vocab_size
self.hidden_stride = hidden_stride
self.num_visual_indicator_tokens = num_visual_indicator_tokens
self.tokenize_function = tokenize_function
self.initializer_range = initializer_range
| Ovis2VisionConfig |
python | getsentry__sentry | src/sentry/web/frontend/auth_close.py | {
"start": 187,
"end": 635
} | class ____(BaseView):
"""This is a view to handle when sentry log in has been opened from
another window. This view loads an html page with a script that sends a message
back to the window opener and closes the window"""
def handle(self, request: HttpRequest) -> HttpResponse:
logged_in = request.user.is_authenticated
return render_to_response("sentry/auth_close.html", context={"logged_in": logged_in})
| AuthCloseView |
python | numpy__numpy | numpy/polynomial/tests/test_legendre.py | {
"start": 17114,
"end": 18784
} | class ____:
def test_legfromroots(self):
res = leg.legfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2])
pol = leg.legfromroots(roots)
res = leg.legval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(leg.leg2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_legroots(self):
assert_almost_equal(leg.legroots([1]), [])
assert_almost_equal(leg.legroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = leg.legroots(leg.legfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_legtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, leg.legtrim, coef, -1)
# Test results
assert_equal(leg.legtrim(coef), coef[:-1])
assert_equal(leg.legtrim(coef, 1), coef[:-3])
assert_equal(leg.legtrim(coef, 2), [0])
def test_legline(self):
assert_equal(leg.legline(3, 4), [3, 4])
def test_legline_zeroscl(self):
assert_equal(leg.legline(3, 0), [3])
def test_leg2poly(self):
for i in range(10):
assert_almost_equal(leg.leg2poly([0] * i + [1]), Llist[i])
def test_poly2leg(self):
for i in range(10):
assert_almost_equal(leg.poly2leg(Llist[i]), [0] * i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)
tgt = 1.
res = leg.legweight(x)
assert_almost_equal(res, tgt)
| TestMisc |
python | django-guardian__django-guardian | guardian/models/models.py | {
"start": 4143,
"end": 4403
} | class ____(UserObjectPermissionBase, BaseGenericObjectPermission):
class Meta(UserObjectPermissionBase.Meta, BaseGenericObjectPermission.Meta):
abstract = True
unique_together = ["user", "permission", "object_pk"]
| UserObjectPermissionAbstract |
python | ipython__ipython | IPython/terminal/pt_inputhooks/gtk4.py | {
"start": 79,
"end": 557
} | class ____:
def __init__(self, context):
self._quit = False
GLib.io_add_watch(
context.fileno(), GLib.PRIORITY_DEFAULT, GLib.IO_IN, self.quit
)
def quit(self, *args, **kwargs):
self._quit = True
return False
def run(self):
context = GLib.MainContext.default()
while not self._quit:
context.iteration(True)
def inputhook(context):
hook = _InputHook(context)
hook.run()
| _InputHook |
python | ray-project__ray | rllib/callbacks/tests/test_callbacks_on_algorithm.py | {
"start": 1070,
"end": 1356
} | class ____(RLlibCallback):
def on_algorithm_init(self, *, algorithm, metrics_logger, **kwargs):
self._on_init_was_called = True
def on_checkpoint_loaded(self, *, algorithm, **kwargs):
self._on_checkpoint_loaded_was_called = True
| InitAndCheckpointRestoredCallbacks |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_quote_name03.py | {
"start": 315,
"end": 1507
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("quote_name03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
sheetnames = (
"Sheet<1",
"Sheet>2",
"Sheet=3",
"Sheet@4",
"Sheet^5",
"Sheet`6",
"Sheet7",
"Sheet~8",
)
for sheetname in sheetnames:
worksheet = workbook.add_worksheet(sheetname)
chart = workbook.add_chart({"type": "pie"})
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": [sheetname, 0, 0, 4, 0]})
worksheet.insert_chart("E6", chart, {"x_offset": 26, "y_offset": 17})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/profiles/flamegraph.py | {
"start": 2330,
"end": 28966
} | class ____:
def __init__(
self,
*,
request: HttpRequest,
snuba_params: SnubaParams,
data_source: Literal["functions", "transactions", "profiles", "spans"],
query: str,
fingerprint: int | None = None,
):
self.request = request
self.snuba_params = snuba_params
self.data_source = data_source
self.query = query
self.fingerprint = fingerprint
def get_profile_candidates(self) -> ProfileCandidates:
if self.data_source == "functions":
return self.get_profile_candidates_from_functions()
elif self.data_source == "transactions":
return self.get_profile_candidates_from_transactions()
elif self.data_source == "profiles":
return self.get_profile_candidates_from_profiles()
elif self.data_source == "spans":
return self.get_profile_candidates_from_spans()
raise NotImplementedError
def get_profile_candidates_from_functions(self) -> ProfileCandidates:
max_profiles = options.get("profiling.flamegraph.profile-set.size")
builder = ProfileFunctionsQueryBuilder(
dataset=Dataset.Functions,
params={},
snuba_params=self.snuba_params,
selected_columns=["project.id", "timestamp", "all_examples()"],
query=self.query,
limit=max_profiles,
config=QueryBuilderConfig(
transform_alias_to_input_format=True,
),
)
if self.fingerprint is not None:
builder.add_conditions(
[Condition(builder.resolve_column("fingerprint"), Op.EQ, self.fingerprint)]
)
results = builder.run_query(Referrer.API_PROFILING_FUNCTION_SCOPED_FLAMEGRAPH.value)
results = builder.process_results(results)
transaction_profile_candidates: list[TransactionProfileCandidate] = []
profiler_metas: list[ProfilerMeta] = []
for row in results["data"]:
project = row["project.id"]
for example in row["all_examples()"]:
if len(transaction_profile_candidates) > max_profiles:
break
if "profile_id" in example:
transaction_profile_candidates.append(
{
"project_id": project,
"profile_id": example["profile_id"],
}
)
elif "profiler_id" in example:
profiler_metas.append(
ProfilerMeta(
project_id=project,
profiler_id=example["profiler_id"],
thread_id=example["thread_id"],
start=example["start"],
end=example["end"],
)
)
else:
# Will go to break if the inner loop breaks
continue
break
max_continuous_profile_candidates = max(
max_profiles - len(transaction_profile_candidates), 0
)
continuous_profile_candidates, _ = self.get_chunks_for_profilers(
profiler_metas,
max_continuous_profile_candidates,
)
return {
"transaction": transaction_profile_candidates,
"continuous": continuous_profile_candidates,
}
def get_profile_candidates_from_transactions(self) -> ProfileCandidates:
max_profiles = options.get("profiling.flamegraph.profile-set.size")
initial_chunk_delta_hours = options.get(
"profiling.flamegraph.query.initial_chunk_delta.hours"
)
max_chunk_delta_hours = options.get("profiling.flamegraph.query.max_delta.hours")
multiplier = options.get("profiling.flamegraph.query.multiplier")
initial_chunk_delta = timedelta(hours=initial_chunk_delta_hours)
max_chunk_delta = timedelta(hours=max_chunk_delta_hours)
transaction_profile_candidates: list[TransactionProfileCandidate] = []
profiler_metas: list[ProfilerMeta] = []
assert self.snuba_params.start is not None and self.snuba_params.end is not None
snuba_params = self.snuba_params.copy()
for chunk_start, chunk_end in split_datetime_range_exponential(
self.snuba_params.start,
self.snuba_params.end,
initial_chunk_delta,
max_chunk_delta,
multiplier,
reverse=True,
):
snuba_params.start = chunk_start
snuba_params.end = chunk_end
builder = self.get_transactions_based_candidate_query(
query=self.query, limit=max_profiles, snuba_params=snuba_params
)
results = builder.run_query(
Referrer.API_PROFILING_PROFILE_FLAMEGRAPH_TRANSACTION_CANDIDATES.value,
)
results = builder.process_results(results)
for row in results["data"]:
if row["profile.id"] is not None:
transaction_profile_candidates.append(
{
"project_id": row["project.id"],
"profile_id": row["profile.id"],
}
)
elif row["profiler.id"] is not None and row["thread.id"]:
profiler_metas.append(
ProfilerMeta(
project_id=row["project.id"],
profiler_id=row["profiler.id"],
thread_id=row["thread.id"],
start=row["precise.start_ts"],
end=row["precise.finish_ts"],
transaction_id=row["id"],
)
)
if len(transaction_profile_candidates) >= max_profiles:
break
max_continuous_profile_candidates = max(
max_profiles - len(transaction_profile_candidates), 0
)
continuous_profile_candidates: list[ContinuousProfileCandidate] = []
if max_continuous_profile_candidates > 0:
snuba_params.end = self.snuba_params.end
continuous_profile_candidates, _ = self.get_chunks_for_profilers(
profiler_metas, max_continuous_profile_candidates, snuba_params
)
return {
"transaction": transaction_profile_candidates,
"continuous": continuous_profile_candidates,
}
def get_transactions_based_candidate_query(
self,
query: str | None,
limit: int,
snuba_params: SnubaParams | None = None,
) -> DiscoverQueryBuilder:
builder = DiscoverQueryBuilder(
dataset=Dataset.Discover,
params={},
snuba_params=snuba_params or self.snuba_params,
selected_columns=[
"id",
"project.id",
"precise.start_ts",
"precise.finish_ts",
"profile.id",
"profiler.id",
"thread.id",
"timestamp",
],
query=query,
orderby=["-timestamp"],
limit=limit,
config=QueryBuilderConfig(
transform_alias_to_input_format=True,
),
)
builder.add_conditions(
[
Or(
conditions=[
Condition(builder.resolve_column("profile.id"), Op.IS_NOT_NULL),
And(
conditions=[
Condition(builder.resolve_column("profiler.id"), Op.IS_NOT_NULL),
Condition(
Function("has", [Column("contexts.key"), "trace.thread_id"]),
Op.EQ,
1,
),
],
),
],
),
],
)
return builder
def get_chunks_for_profilers(
self,
profiler_metas: list[ProfilerMeta],
limit: int,
snuba_params: SnubaParams | None = None,
) -> tuple[list[ContinuousProfileCandidate], float]:
total_duration = 0.0
if len(profiler_metas) == 0:
return [], total_duration
chunk_size = options.get("profiling.continuous-profiling.chunks-query.size")
queries = [
self._create_chunks_query(chunk, snuba_params)
for chunk in chunked(profiler_metas, chunk_size)
]
results = self._query_chunks_for_profilers(queries)
profiler_metas_by_profiler = defaultdict(list)
for profiler_meta in profiler_metas:
key = (profiler_meta.project_id, profiler_meta.profiler_id)
profiler_metas_by_profiler[key].append(profiler_meta)
continuous_profile_candidates: list[ContinuousProfileCandidate] = []
for result in results:
for row in result["data"]:
start = datetime.fromisoformat(row["start_timestamp"]).timestamp()
end = datetime.fromisoformat(row["end_timestamp"]).timestamp()
key = (row["project_id"], row["profiler_id"])
for profiler_meta in profiler_metas_by_profiler[key]:
if start > profiler_meta.end or end < profiler_meta.start:
continue
if len(continuous_profile_candidates) > limit:
break
candidate: ContinuousProfileCandidate = {
"project_id": profiler_meta.project_id,
"profiler_id": profiler_meta.profiler_id,
"chunk_id": row["chunk_id"],
"thread_id": profiler_meta.thread_id,
"start": str(int(profiler_meta.start * 1e9)),
"end": str(int(profiler_meta.end * 1e9)),
}
total_duration += profiler_meta.end - profiler_meta.start
if profiler_meta.transaction_id is not None:
candidate["transaction_id"] = profiler_meta.transaction_id
continuous_profile_candidates.append(candidate)
else:
# Will go to break if the inner loop breaks
continue
break
else:
# Will go to break if the inner loop breaks
continue
break
return continuous_profile_candidates, total_duration
def _create_chunks_query(
self, profiler_metas: list[ProfilerMeta], snuba_params: SnubaParams | None = None
) -> Query:
assert profiler_metas, "profiler_metas cannot be empty"
snuba_params = snuba_params or self.snuba_params
profiler_conditions = [profiler_meta.as_condition() for profiler_meta in profiler_metas]
if len(profiler_conditions) == 1:
profilers_condition = profiler_conditions[0]
else:
profilers_condition = Or(conditions=profiler_conditions)
project_condition = Condition(
Column("project_id"),
Op.IN,
list({profiler_meta.project_id for profiler_meta in profiler_metas}),
)
start_condition = Condition(
Column("start_timestamp"),
Op.LT,
resolve_datetime64(snuba_params.end),
)
end_condition = Condition(
Column("end_timestamp"), Op.GTE, resolve_datetime64(snuba_params.start)
)
return Query(
match=Storage(StorageKey.ProfileChunks.value),
select=[
Column("project_id"),
Column("profiler_id"),
Column("chunk_id"),
Column("start_timestamp"),
Column("end_timestamp"),
],
where=[
project_condition,
start_condition,
end_condition,
profilers_condition,
],
# Order by here follows that of the underlying table
# as a performance optimization
orderby=[
OrderBy(Column("project_id"), Direction.DESC),
OrderBy(Column("profiler_id"), Direction.DESC),
OrderBy(Column("start_timestamp"), Direction.DESC),
],
limit=Limit(options.get("profiling.continuous-profiling.chunks-set.size")),
)
def _query_chunks_for_profilers(self, queries: list[Query]) -> list[Mapping[str, Any]]:
"""This function is split out for mocking as we cannot write to the
profile chunks dataset in tests today"""
if self.snuba_params.organization is None:
raise ValueError("`organization` is required and cannot be `None`")
referrer = Referrer.API_PROFILING_PROFILE_FLAMEGRAPH_CHUNK_CANDIDATES.value
requests = [
Request(
dataset=Dataset.Profiles.value,
app_id="default",
query=query,
tenant_ids={
"referrer": referrer,
"organization_id": self.snuba_params.organization.id,
},
)
for query in queries
]
return bulk_snuba_queries(requests, referrer=referrer)
def get_profile_candidates_from_profiles(self) -> ProfileCandidates:
if self.snuba_params.organization is None:
raise ValueError("`organization` is required and cannot be `None`")
max_profiles = options.get("profiling.flamegraph.profile-set.size")
initial_chunk_delta_hours = options.get(
"profiling.flamegraph.query.initial_chunk_delta.hours"
)
max_chunk_delta_hours = options.get("profiling.flamegraph.query.max_delta.hours")
multiplier = options.get("profiling.flamegraph.query.multiplier")
initial_chunk_delta = timedelta(hours=initial_chunk_delta_hours)
max_chunk_delta = timedelta(hours=max_chunk_delta_hours)
referrer = Referrer.API_PROFILING_PROFILE_FLAMEGRAPH_PROFILE_CANDIDATES.value
transaction_profile_candidates: list[TransactionProfileCandidate] = []
profiler_metas: list[ProfilerMeta] = []
assert self.snuba_params.start is not None and self.snuba_params.end is not None
snuba_params = self.snuba_params.copy()
for chunk_start, chunk_end in split_datetime_range_exponential(
self.snuba_params.start,
self.snuba_params.end,
initial_chunk_delta,
max_chunk_delta,
multiplier,
reverse=True,
):
snuba_params.start = chunk_start
snuba_params.end = chunk_end
builder = self.get_transactions_based_candidate_query(
query=self.query, limit=max_profiles, snuba_params=snuba_params
)
results = builder.run_query(referrer)
results = builder.process_results(results)
for row in results["data"]:
if row["profile.id"] is not None:
transaction_profile_candidates.append(
{
"project_id": row["project.id"],
"profile_id": row["profile.id"],
}
)
elif row["profiler.id"] is not None and row["thread.id"]:
profiler_metas.append(
ProfilerMeta(
project_id=row["project.id"],
profiler_id=row["profiler.id"],
thread_id=row["thread.id"],
start=row["precise.start_ts"],
end=row["precise.finish_ts"],
transaction_id=row["id"],
)
)
if len(transaction_profile_candidates) + len(profiler_metas) >= max_profiles:
break
max_continuous_profile_candidates = max(
max_profiles - len(transaction_profile_candidates), 0
)
continuous_profile_candidates: list[ContinuousProfileCandidate] = []
continuous_duration = 0.0
# If there are continuous profiles attached to transactions, we prefer those as
# the active thread id gives us more user friendly flamegraphs than without.
if profiler_metas and max_continuous_profile_candidates > 0:
snuba_params.end = self.snuba_params.end
continuous_profile_candidates, continuous_duration = self.get_chunks_for_profilers(
profiler_metas, max_continuous_profile_candidates, snuba_params
)
seen_chunks = {
(candidate["profiler_id"], candidate["chunk_id"])
for candidate in continuous_profile_candidates
}
# If we still don't have enough continuous profile candidates + transaction profile candidates,
# we'll fall back to directly using the continuous profiling data
if len(continuous_profile_candidates) + len(transaction_profile_candidates) < max_profiles:
conditions = []
conditions.append(Condition(Column("project_id"), Op.IN, self.snuba_params.project_ids))
conditions.append(
Condition(
Column("start_timestamp"), Op.LT, resolve_datetime64(self.snuba_params.end)
)
)
conditions.append(
Condition(
Column("end_timestamp"), Op.GTE, resolve_datetime64(self.snuba_params.start)
)
)
environments = self.snuba_params.environment_names
if environments:
conditions.append(Condition(Column("environment"), Op.IN, environments))
continuous_profiles_query = Query(
match=Storage(StorageKey.ProfileChunks.value),
select=[
Column("project_id"),
Column("profiler_id"),
Column("chunk_id"),
Column("start_timestamp"),
Column("end_timestamp"),
],
where=conditions,
orderby=[OrderBy(Column("start_timestamp"), Direction.DESC)],
limit=Limit(max_profiles),
)
all_results = bulk_snuba_queries(
[
Request(
dataset=Dataset.Profiles.value,
app_id="default",
query=continuous_profiles_query,
tenant_ids={
"referrer": referrer,
"organization_id": self.snuba_params.organization.id,
},
),
],
referrer,
)
continuous_profile_results = all_results[0]
for row in continuous_profile_results["data"]:
# Make sure to dedupe profile chunks so we don't reuse chunks
if (row["profiler_id"], row["chunk_id"]) in seen_chunks:
continue
start_timestamp = datetime.fromisoformat(row["start_timestamp"]).timestamp()
end_timestamp = datetime.fromisoformat(row["end_timestamp"]).timestamp()
candidate: ContinuousProfileCandidate = {
"project_id": row["project_id"],
"profiler_id": row["profiler_id"],
"chunk_id": row["chunk_id"],
"start": str(int(start_timestamp * 1e9)),
"end": str(int(end_timestamp * 1e9)),
}
continuous_profile_candidates.append(candidate)
# can set max duration to negative to skip this check
if (
len(continuous_profile_candidates) + len(transaction_profile_candidates)
>= max_profiles
):
break
return {
"transaction": transaction_profile_candidates,
"continuous": continuous_profile_candidates,
}
def get_profile_candidates_from_spans(self) -> ProfileCandidates:
max_profiles = options.get("profiling.flamegraph.profile-set.size")
results = self.get_spans_based_candidates(query=self.query, limit=max_profiles)
transaction_profile_candidates: list[TransactionProfileCandidate] = [
{
"project_id": row["project.id"],
"profile_id": row["profile.id"],
}
for row in results["data"]
if row["profile.id"] is not None and row["profile.id"] != ""
]
max_continuous_profile_candidates = max(
max_profiles - len(transaction_profile_candidates), 0
)
continuous_profile_candidates, _ = self.get_chunks_for_profilers(
[
ProfilerMeta(
project_id=row["project.id"],
profiler_id=row["profiler.id"],
thread_id=row["thread.id"],
start=row["precise.start_ts"],
end=row["precise.finish_ts"],
)
for row in results["data"]
if row["profiler.id"] is not None and row["thread.id"]
],
max_continuous_profile_candidates,
)
return {
"transaction": transaction_profile_candidates,
"continuous": continuous_profile_candidates,
}
def get_spans_based_candidates(self, query: str | None, limit: int) -> EAPResponse:
# add constraints in order to fetch only spans with profiles
profiling_constraint = "(has:profile.id) or (has:profiler.id has:thread.id)"
if query is not None and len(query) > 0:
query = f"{query} and {profiling_constraint}"
else:
query = profiling_constraint
return Spans.run_table_query(
params=self.snuba_params,
query_string=query,
selected_columns=[
"id",
"project.id",
"precise.start_ts",
"precise.finish_ts",
"profile.id",
"profiler.id",
"thread.id",
"timestamp",
],
orderby=["-timestamp"],
offset=0,
limit=limit,
referrer=Referrer.API_TRACE_EXPLORER_TRACE_SPANS_CANDIDATES_FLAMEGRAPH.value,
sampling_mode=None,
config=SearchResolverConfig(
auto_fields=True,
),
)
def split_datetime_range_exponential(
start_datetime: datetime,
end_datetime: datetime,
initial_chunk_delta: timedelta,
max_delta: timedelta,
multiplier: int,
reverse: bool = False,
) -> Iterator[tuple[datetime, datetime]]:
"""
Splits a datetime range into exponentially increasing chunks, yielded by a generator.
The duration of each chunk increase `multiplier` times from the previous one until
it reaches the max_delta, at which point the chunk size remains constant.
Args:
start_datetime (datetime): The start of the datetime range.
end_datetime (datetime): The end of the datetime range.
initial_chunk_delta (timedelta): The duration of the first chunk.
max_delta (timedelta): The maximum duration for any chunk.
multiplier (int): The value by which the current delta is multiplied.
reverse (bool): If True, generate chunks in reverse order from end to start.
Yields:
tuple: A tuple representing a datetime chunk (start_of_chunk, end_of_chunk).
Raises:
TypeError: If args are not the correct datetime/timedelta objects.
ValueError: If datetimes are invalid, or deltas are not positive,
or initial_chunk_delta > max_delta.
"""
if not all(isinstance(dt, datetime) for dt in [start_datetime, end_datetime]):
raise TypeError("start_datetime and end_datetime must be datetime objects.")
if not all(isinstance(td, timedelta) for td in [initial_chunk_delta, max_delta]):
raise TypeError("initial_chunk_delta and max_delta must be timedelta objects.")
if start_datetime > end_datetime:
raise ValueError("start_datetime cannot be after end_datetime.")
if initial_chunk_delta.total_seconds() <= 0 or max_delta.total_seconds() <= 0:
raise ValueError("Time deltas must be positive durations.")
if initial_chunk_delta > max_delta:
raise ValueError("initial_chunk_delta cannot be greater than max_delta.")
if multiplier <= 0:
raise ValueError("multiplier must be a positive integer.")
if reverse:
# Generate chunks in reverse order (from end to start)
current_datetime = end_datetime
current_delta = initial_chunk_delta
while current_datetime > start_datetime:
chunk_start = current_datetime - current_delta
# Ensure the first chunk does not go past the start_datetime
if chunk_start < start_datetime:
chunk_start = start_datetime
yield (chunk_start, current_datetime)
# Prepare for the next iteration
current_datetime = chunk_start
# Multiply the delta for the next chunk, but cap it at max_delta
current_delta = min(current_delta * multiplier, max_delta)
else:
# Original forward logic
current_datetime = start_datetime
current_delta = initial_chunk_delta
while current_datetime < end_datetime:
chunk_end = current_datetime + current_delta
# Ensure the last chunk does not go past the end_datetime
if chunk_end > end_datetime:
chunk_end = end_datetime
yield (current_datetime, chunk_end)
# Prepare for the next iteration
current_datetime = chunk_end
# Double the delta for the next chunk, but cap it at max_delta
current_delta = min(current_delta * multiplier, max_delta)
| FlamegraphExecutor |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 22543,
"end": 22612
} | class ____(_OracleNumericCommon, sqltypes.Float):
pass
| _OracleFloat |
python | qdrant__qdrant-client | qdrant_client/http/api/indexes_api.py | {
"start": 4424,
"end": 5421
} | class ____(_IndexesApi):
def create_field_index(
self,
collection_name: str,
wait: bool = None,
ordering: WriteOrdering = None,
create_field_index: m.CreateFieldIndex = None,
) -> m.InlineResponse2005:
"""
Create index for field in collection
"""
return self._build_for_create_field_index(
collection_name=collection_name,
wait=wait,
ordering=ordering,
create_field_index=create_field_index,
)
def delete_field_index(
self,
collection_name: str,
field_name: str,
wait: bool = None,
ordering: WriteOrdering = None,
) -> m.InlineResponse2005:
"""
Delete field index for collection
"""
return self._build_for_delete_field_index(
collection_name=collection_name,
field_name=field_name,
wait=wait,
ordering=ordering,
)
| SyncIndexesApi |
python | streamlit__streamlit | lib/tests/streamlit/file_util_test.py | {
"start": 4968,
"end": 6820
} | class ____(unittest.TestCase):
def test_file_in_folder(self):
# Test with and without trailing slash
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/b/c/")
assert ret
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/b/c")
assert ret
def test_file_in_subfolder(self):
# Test with and without trailing slash
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a")
assert ret
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/")
assert ret
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/b")
assert ret
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/b/")
assert ret
def test_file_not_in_folder(self):
# Test with and without trailing slash
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/d/e/f/")
assert not ret
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/d/e/f")
assert not ret
def test_rel_file_not_in_folder(self):
# Test with and without trailing slash
ret = file_util.file_is_in_folder_glob("foo.py", "/d/e/f/")
assert not ret
ret = file_util.file_is_in_folder_glob("foo.py", "/d/e/f")
assert not ret
def test_file_in_folder_glob(self):
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "**/c")
assert ret
def test_file_not_in_folder_glob(self):
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "**/f")
assert not ret
def test_rel_file_not_in_folder_glob(self):
ret = file_util.file_is_in_folder_glob("foo.py", "**/f")
assert not ret
def test_rel_file_in_folder_glob(self):
ret = file_util.file_is_in_folder_glob("foo.py", "")
assert ret
| FileIsInFolderTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-intercom/unit_tests/config_builder.py | {
"start": 122,
"end": 689
} | class ____:
def __init__(self) -> None:
self._config = {
"access_token": "fake_access_token",
"start_date": "2010-01-18T21:18:20Z",
}
def start_date(self, start_date: datetime) -> "ConfigBuilder":
self._config["start_date"] = start_date.strftime("%Y-%m-%dT%H:%M:%SZ")
return self
def access_token(self, refresh_token: str) -> "ConfigBuilder":
self._config["refresh_token"] = refresh_token
return self
def build(self) -> Mapping[str, Any]:
return self._config
| ConfigBuilder |
python | astropy__astropy | astropy/io/ascii/ipac.py | {
"start": 525,
"end": 754
} | class ____(Exception):
def __str__(self):
return "{}\nSee {}".format(
super().__str__(),
"https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html",
)
| IpacFormatErrorDBMS |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.