language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/jetmoe/modeling_jetmoe.py | {
"start": 22857,
"end": 24606
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: JetMoeConfig, layer_idx: Optional[int] = None):
super().__init__()
self.hidden_size = config.hidden_size
self.mlp = JetMoeMoE(config)
self.input_layernorm = JetMoeRMSNorm(config.hidden_size)
self.post_attention_layernorm = JetMoeRMSNorm(config.hidden_size)
self.self_attention = JetMoeAttention(config, layer_idx)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _, _ = self.self_attention(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| JetMoeDecoderLayer |
python | conda__conda | conda/env/env.py | {
"start": 10661,
"end": 11672
} | class ____(dict):
"""A ``dict`` subclass that parses the raw dependencies into a conda and pip list"""
def __init__(self, raw, *args, **kwargs):
super().__init__(*args, **kwargs)
self.raw = raw
self.parse()
def parse(self):
"""Parse the raw dependencies into a conda and pip list"""
if not self.raw:
return
self.update({"conda": []})
for line in self.raw:
if isinstance(line, dict):
self.update(line)
else:
self["conda"].append(str(MatchSpec(line)))
if "pip" in self:
if not self["pip"]:
del self["pip"]
if not any(MatchSpec(s).name == "pip" for s in self["conda"]):
self["conda"].append("pip")
# TODO only append when it's not already present
def add(self, package_name):
"""Add a package to the ``EnvironmentYaml``"""
self.raw.append(package_name)
self.parse()
| Dependencies |
python | tensorflow__tensorflow | tensorflow/compiler/tests/quantized_ops_test.py | {
"start": 1767,
"end": 4046
} | class ____(xla_test.XLATestCase):
def pack_uint8_r2_to_uint32(self, test_input):
num_rows, num_columns = test_input.get_shape().as_list()
num_output_columns = int(math.ceil(num_columns / 4.0))
padding_input = array_ops.pad(
math_ops.cast(test_input, dtype=dtypes.uint8),
constant_op.constant([[
0,
0,
], [0, num_output_columns * 4 - num_columns]]))
output = array_ops.zeros([num_rows, num_output_columns],
dtype=dtypes.uint32)
num_elements_per_pack = 4
shift_bits = 8
iota_r1 = math_ops.range(num_output_columns * num_elements_per_pack)
for p in range(num_elements_per_pack):
selected_index = math_ops.equal(
math_ops.mod(iota_r1, num_elements_per_pack), p)
gather_index = array_ops.boolean_mask(iota_r1, selected_index)
gathered_input = array_ops.gather(padding_input, gather_index, axis=1)
total_shift_bits = shift_bits * (num_elements_per_pack - p - 1)
left_shift_input = bitwise_ops.left_shift(
math_ops.cast(gathered_input, dtype=dtypes.uint32), total_shift_bits)
output = bitwise_ops.bitwise_or(output, left_shift_input)
return output
def testDequantizeQuint8(self):
num_rows = 100
num_columns = 3547
random_input = np.random.normal(128.0, 10.0, [num_rows, num_columns])
with self.session() as session:
with ops.device("CPU"):
test_input = ops.convert_to_tensor(random_input, dtype=dtypes.float32)
transposed_input = array_ops.transpose(test_input, [1, 0])
quantized_input = array_ops.quantize(transposed_input, 0.0, 255.0,
dtypes.quint8)
packed_input = self.pack_uint8_r2_to_uint32(quantized_input.output)
with self.test_scope():
transposed_quantized_output = xla.dequantize(packed_input, 0.0, 255.0,
"MIN_COMBINED", True)
quantized_output = array_ops.slice(transposed_quantized_output, [0, 0],
[num_rows, num_columns])
value = session.run(quantized_output)
self.assertAllClose(value, random_input, 1.0)
if __name__ == "__main__":
googletest.main()
| DequantizedOpsTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 37831,
"end": 38504
} | class ____(PrefectBaseModel):
"""A representation of a work pool's storage configuration"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
bundle_upload_step: Optional[dict[str, Any]] = Field(
default=None,
description="The step to use for uploading bundles to storage.",
)
bundle_execution_step: Optional[dict[str, Any]] = Field(
default=None,
description="The step to use for executing bundles.",
)
default_result_storage_block_id: Optional[UUID] = Field(
default=None,
description="The block document ID of the default result storage block.",
)
| WorkPoolStorageConfiguration |
python | mlflow__mlflow | mlflow/telemetry/schemas.py | {
"start": 872,
"end": 1222
} | class ____(str, Enum):
MLFLOW_TRACING = "mlflow-tracing"
MLFLOW = "mlflow"
MLFLOW_SKINNY = "mlflow-skinny"
def get_source_sdk() -> SourceSDK:
if IS_TRACING_SDK_ONLY:
return SourceSDK.MLFLOW_TRACING
elif IS_MLFLOW_SKINNY:
return SourceSDK.MLFLOW_SKINNY
else:
return SourceSDK.MLFLOW
@dataclass
| SourceSDK |
python | spyder-ide__spyder | spyder/plugins/toolbar/container.py | {
"start": 1593,
"end": 17479
} | class ____(PluginMainContainer):
def __init__(self, name, plugin, parent=None):
super().__init__(name, plugin, parent=parent)
self._APPLICATION_TOOLBARS = OrderedDict()
self._ADDED_TOOLBARS = OrderedDict()
self._toolbarslist: list[ApplicationToolbar] = []
self._visible_toolbars: list[ApplicationToolbar] = []
self._ITEMS_QUEUE: Dict[str, List[ItemInfo]] = {}
# ---- Private Methods
# ------------------------------------------------------------------------
def _save_visible_toolbars(self):
"""Save the name of the visible toolbars in the options."""
toolbars = []
for toolbar in self._visible_toolbars:
toolbars.append(toolbar.objectName())
self.set_conf('last_visible_toolbars', toolbars)
def _set_visible_toolbars(self):
"""Set the current visible toolbars in an attribute."""
toolbars = []
for toolbar in self._toolbarslist:
if (
toolbar.toggleViewAction().isChecked()
and toolbar not in toolbars
):
toolbars.append(toolbar)
self._visible_toolbars = toolbars
@Slot()
def _show_toolbars(self):
"""Show/Hide toolbars."""
value = not self.get_conf("toolbars_visible")
self.set_conf("toolbars_visible", value)
if value:
self._save_visible_toolbars()
else:
self._set_visible_toolbars()
for toolbar in self._visible_toolbars:
toolbar.setVisible(value)
self.update_actions()
def _add_missing_toolbar_elements(self, toolbar, toolbar_id):
if toolbar_id in self._ITEMS_QUEUE:
pending_items = self._ITEMS_QUEUE.pop(toolbar_id)
for item, section, before, before_section in pending_items:
toolbar.add_item(item, section=section, before=before,
before_section=before_section)
# ---- PluginMainContainer API
# ------------------------------------------------------------------------
def setup(self):
self.show_toolbars_action = self.create_action(
ToolbarActions.ShowToolbars,
text=_("Show toolbars"),
triggered=self._show_toolbars
)
self.toolbars_menu = self.create_menu(
ToolbarMenus.ToolbarsMenu,
_("Toolbars"),
)
def update_actions(self):
visible_toolbars = self.get_conf("toolbars_visible")
if visible_toolbars:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
self.toolbars_menu.setEnabled(visible_toolbars)
# ---- Public API
# ------------------------------------------------------------------------
def create_application_toolbar(
self,
toolbar_id: str,
title: str
) -> ApplicationToolbar:
"""
Create an application toolbar and add it to the main window.
Parameters
----------
toolbar_id: str
The toolbar unique identifier string.
title: str
The localized toolbar title to be displayed.
Returns
-------
spyder.api.widgets.toolbar.ApplicationToolbar
The created application toolbar.
"""
if toolbar_id in self._APPLICATION_TOOLBARS:
raise SpyderAPIError(
'Toolbar with ID "{}" already added!'.format(toolbar_id)
)
toolbar = ApplicationToolbar(self, toolbar_id, title)
self._add_missing_toolbar_elements(toolbar, toolbar_id)
return toolbar
def add_application_toolbar(self, toolbar, mainwindow=None):
"""
Add toolbar to application toolbars.
Parameters
----------
toolbar: spyder.api.widgets.toolbars.ApplicationToolbar
The application toolbar to add to the `mainwindow`.
mainwindow: QMainWindow
The main application window.
"""
# Check toolbar class
if not isinstance(toolbar, ApplicationToolbar):
raise SpyderAPIError(
'Any toolbar must subclass ApplicationToolbar!'
)
# Check ID
toolbar_id = toolbar.ID
if toolbar_id is None:
raise SpyderAPIError(
f"Toolbar `{repr(toolbar)}` doesn't have an identifier!"
)
if toolbar_id in self._ADDED_TOOLBARS:
raise SpyderAPIError(
'Toolbar with ID "{}" already added!'.format(toolbar_id))
# Add toolbar to registry and add it to the app toolbars dict
TOOLBAR_REGISTRY.register_reference(
toolbar, toolbar_id, self.PLUGIN_NAME, self.CONTEXT_NAME
)
self._APPLICATION_TOOLBARS[toolbar_id] = toolbar
# TODO: Make the icon size adjustable in Preferences later on.
iconsize = 24
toolbar.setIconSize(QSize(iconsize, iconsize))
toolbar.setObjectName(toolbar_id)
self._ADDED_TOOLBARS[toolbar_id] = toolbar
self._toolbarslist.append(toolbar)
if mainwindow:
mainwindow.addToolBar(toolbar)
self._add_missing_toolbar_elements(toolbar, toolbar_id)
def remove_application_toolbar(self, toolbar_id: str, mainwindow=None):
"""
Remove toolbar from application toolbars.
Parameters
----------
toolbar: str
The application toolbar to remove from the `mainwindow`.
mainwindow: QMainWindow
The main application window.
"""
if toolbar_id not in self._ADDED_TOOLBARS:
raise SpyderAPIError(
'Toolbar with ID "{}" is not in the main window'.format(
toolbar_id))
toolbar = self._ADDED_TOOLBARS.pop(toolbar_id)
self._toolbarslist.remove(toolbar)
if mainwindow:
mainwindow.removeToolBar(toolbar)
def add_item_to_application_toolbar(
self,
item: ToolbarItem,
toolbar_id: Optional[str] = None,
section: Optional[str] = None,
before: Optional[str] = None,
before_section: Optional[str] = None,
omit_id: bool = False
):
"""
Add action or widget `item` to given application toolbar `section`.
Parameters
----------
item: SpyderAction or QWidget
The item to add to the `toolbar`.
toolbar_id: str or None
The application toolbar unique string identifier.
section: str or None
The section id in which to insert the `item` on the `toolbar`.
before: str or None
Make the item appear before another given item.
before_section: str or None
Make the item defined section appear before another given section
(the section must be already defined).
omit_id: bool
If True, then the toolbar will check if the item to add declares an
id, False otherwise. This flag exists only for items added on
Spyder 4 plugins. Default: False
"""
if toolbar_id not in self._APPLICATION_TOOLBARS:
pending_items = self._ITEMS_QUEUE.get(toolbar_id, [])
pending_items.append((item, section, before, before_section))
self._ITEMS_QUEUE[toolbar_id] = pending_items
else:
toolbar = self.get_application_toolbar(toolbar_id)
toolbar.add_item(item, section=section, before=before,
before_section=before_section, omit_id=omit_id)
def remove_item_from_application_toolbar(
self,
item_id: str,
toolbar_id: Optional[str] = None
):
"""
Remove action or widget from given application toolbar by id.
Parameters
----------
item: str
The item to remove from the `toolbar`.
toolbar_id: str or None
The application toolbar unique string identifier.
"""
if toolbar_id not in self._APPLICATION_TOOLBARS:
raise SpyderAPIError(
'{} is not a valid toolbar_id'.format(toolbar_id))
toolbar = self.get_application_toolbar(toolbar_id)
toolbar.remove_item(item_id)
def get_application_toolbar(self, toolbar_id: str) -> ApplicationToolbar:
"""
Return an application toolbar by toolbar_id.
Parameters
----------
toolbar_id: str
The toolbar unique string identifier.
Returns
-------
spyder.api.widgets.toolbars.ApplicationToolbar
The application toolbar.
"""
if toolbar_id not in self._APPLICATION_TOOLBARS:
raise SpyderAPIError(
'Application toolbar "{0}" not found! '
'Available toolbars are: {1}'.format(
toolbar_id,
list(self._APPLICATION_TOOLBARS.keys())
)
)
return self._APPLICATION_TOOLBARS[toolbar_id]
def get_application_toolbars(self) -> Dict[str, ApplicationToolbar]:
"""
Return all created application toolbars.
Returns
-------
dict
The dict of all the added application toolbars.
"""
return self._APPLICATION_TOOLBARS
def load_application_toolbars(self):
"""Load application toolbars at startup."""
app_toolbars = self.get_application_toolbars()
# Get internal and external toolbars
internal_toolbars = get_class_values(ApplicationToolbars)
external_toolbars = [
toolbar_id
for toolbar_id in app_toolbars.keys()
if toolbar_id not in internal_toolbars
]
# Default order for internal toolbars
internal_toolbars_order = [
ApplicationToolbars.File,
ApplicationToolbars.Run,
ApplicationToolbars.Debug,
ApplicationToolbars.Profile,
ApplicationToolbars.Main,
]
# Check we didn't leave out any internal toolbar from the order above
if DEV:
if (
(set(internal_toolbars) - set(internal_toolbars_order))
!= {ApplicationToolbars.WorkingDirectory}
):
missing_toolbars = (
set(internal_toolbars)
- set(internal_toolbars_order)
- {ApplicationToolbars.WorkingDirectory}
)
raise SpyderAPIError(
f"The internal toolbar(s) {missing_toolbars} are not "
f"listed in the ordering of toolbars that is set below. "
f"Please add them to fix this error"
)
# Reorganize toolbars only if this is the first time Spyder starts or
# new toolbars were added
last_toolbars = self.get_conf("last_toolbars")
if (
not last_toolbars
or set(last_toolbars) != set(app_toolbars.keys())
):
logger.debug("Reorganize application toolbars")
# We need to remove all toolbars first to organize them in the way
# we want
for toolbar in self._toolbarslist:
self._plugin.main.removeToolBar(toolbar)
# Add toolbars with the working directory to the right because it's
# not clear where it ends, so users can have a hard time finding a
# new toolbar in the interface if it's placed next to it.
toolbars_order = internal_toolbars_order + external_toolbars
for toolbar_id in (
toolbars_order
+ [ApplicationToolbars.WorkingDirectory]
):
toolbar = app_toolbars[toolbar_id]
self._plugin.main.addToolBar(toolbar)
toolbar.render()
else:
logger.debug("Render application toolbars")
for toolbar in self._toolbarslist:
toolbar.render()
def save_last_toolbars(self):
"""Save the last available toolbars when the app is closed."""
logger.debug("Saving current application toolbars")
toolbars = []
for toolbar in self._toolbarslist:
toolbars.append(toolbar.objectName())
self.set_conf('last_toolbars', toolbars)
def save_last_visible_toolbars(self):
"""Save the last visible toolbars in our preferences."""
if self.get_conf("toolbars_visible"):
self._set_visible_toolbars()
self._save_visible_toolbars()
def load_last_visible_toolbars(self):
"""Load the last visible toolbars saved in our config system."""
toolbars_names = self.get_conf('last_visible_toolbars')
toolbars_visible = self.get_conf("toolbars_visible")
# This is necessary to discard toolbars that were available in the last
# session but are not on this one.
visible_toolbars = []
for name in toolbars_names:
if name in self._APPLICATION_TOOLBARS:
visible_toolbars.append(self._APPLICATION_TOOLBARS[name])
# Update visible toolbars
self._visible_toolbars = visible_toolbars
# Show visible/hidden toolbars
for toolbar in self._toolbarslist:
if toolbar in self._visible_toolbars:
toolbar.setVisible(toolbars_visible)
toolbar.toggleViewAction().setChecked(toolbars_visible)
else:
toolbar.setVisible(False)
toolbar.toggleViewAction().setChecked(False)
self.update_actions()
def create_toolbars_menu(self):
"""
Populate the toolbars menu inside the view application menu.
"""
main_section = ToolbarsMenuSections.Main
secondary_section = ToolbarsMenuSections.Secondary
default_toolbars = get_class_values(ApplicationToolbars)
for toolbar_id, toolbar in self._ADDED_TOOLBARS.items():
if toolbar:
action = toolbar.toggleViewAction()
# This is necessary to show the same visible toolbars both in
# MainWindow and EditorMainWindow.
action.triggered.connect(self.save_last_visible_toolbars)
if not (PYSIDE2 or PYSIDE6):
# Modifying __class__ of a QObject created by C++ [1] seems
# to invalidate the corresponding Python object when PySide
# is used (changing __class__ of a QObject created in
# Python seems to work).
#
# [1] There are Qt functions such as
# QToolBar.toggleViewAction(), QToolBar.addAction(QString)
# and QMainWindow.addToolbar(QString), which return a
# pointer to an already existing QObject.
action.__class__ = QActionID
# Register action
id_ = f'toggle_view_{toolbar_id}'
action.action_id = id_
ACTION_REGISTRY.register_reference(
action,
id_,
self._plugin.NAME
)
# Add action to menu
section = (
main_section
if toolbar_id in default_toolbars
else secondary_section
)
self.add_item_to_menu(
action,
menu=self.toolbars_menu,
section=section,
)
| ToolbarContainer |
python | rushter__MLAlgorithms | mla/neuralnet/layers/basic.py | {
"start": 2560,
"end": 3127
} | class ____(Layer, PhaseMixin):
"""Randomly set a fraction of `p` inputs to 0 at each training update."""
def __init__(self, p=0.1):
self.p = p
self._mask = None
def forward_pass(self, X):
assert self.p > 0
if self.is_training:
self._mask = np.random.uniform(size=X.shape) > self.p
y = X * self._mask
else:
y = X * (1.0 - self.p)
return y
def backward_pass(self, delta):
return delta * self._mask
def shape(self, x_shape):
return x_shape
| Dropout |
python | networkx__networkx | networkx/readwrite/graphml.py | {
"start": 23828,
"end": 24268
} | class ____:
"""Wrapper for _IncrementalWriter providing an Element like interface.
This wrapper does not intend to be a complete implementation but rather to
deal with those calls used in GraphMLWriter.
"""
def __init__(self, xml, prettyprint):
self.xml = xml
self.prettyprint = prettyprint
def append(self, element):
self.xml.write(element, pretty_print=self.prettyprint)
| IncrementalElement |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 23980,
"end": 24577
} | class ____(_DateFixture, fixtures.TablesTest):
"""this particular suite is testing that datetime parameters get
coerced to dates, which tends to be something DBAPIs do.
"""
__requires__ = "date", "date_coerces_from_datetime"
__backend__ = True
datatype = Date
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
compare = datetime.date(2012, 10, 15)
@testing.requires.datetime_implicit_bound
def test_select_direct(self, connection):
result = connection.scalar(select(literal(self.data)))
eq_(result, self.data)
| DateTimeCoercedToDateTimeTest |
python | ray-project__ray | python/ray/autoscaler/_private/command_runner.py | {
"start": 5303,
"end": 17075
} | class ____(CommandRunnerInterface):
def __init__(
self,
log_prefix,
node_id,
provider,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
):
ssh_control_hash = hashlib.sha1(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.sha1(getuser().encode()).hexdigest()
if sys.platform == "win32":
# Disable SSH control paths on Windows - currently using it cause socket errors
ssh_control_path = None
else:
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(
ssh_user_hash[:HASH_MAX_LENGTH], ssh_control_hash[:HASH_MAX_LENGTH]
)
self.cluster_name = cluster_name
self.log_prefix = log_prefix
self.process_runner = process_runner
self.node_id = node_id
self.use_internal_ip = use_internal_ip
self.provider = provider
self.ssh_private_key = auth_config.get("ssh_private_key")
self.ssh_user = auth_config["ssh_user"]
self.ssh_control_path = ssh_control_path
self.ssh_ip = None
self.ssh_proxy_command = auth_config.get("ssh_proxy_command", None)
self.ssh_options = SSHOptions(
self.ssh_private_key,
self.ssh_control_path,
ProxyCommand=self.ssh_proxy_command,
)
def _get_node_ip(self):
if self.use_internal_ip:
return self.provider.internal_ip(self.node_id)
else:
return self.provider.external_ip(self.node_id)
def _wait_for_ip(self, deadline):
# if we have IP do not print waiting info
ip = self._get_node_ip()
if ip is not None:
cli_logger.labeled_value("Fetched IP", ip)
return ip
interval = AUTOSCALER_NODE_SSH_INTERVAL_S
with cli_logger.group("Waiting for IP"):
while time.time() < deadline and not self.provider.is_terminated(
self.node_id
):
ip = self._get_node_ip()
if ip is not None:
cli_logger.labeled_value("Received", ip)
return ip
cli_logger.print(
"Not yet available, retrying in {} seconds", cf.bold(str(interval))
)
time.sleep(interval)
return None
def _set_ssh_ip_if_required(self):
if self.ssh_ip is not None:
return
# We assume that this never changes.
# I think that's reasonable.
deadline = time.time() + AUTOSCALER_NODE_START_WAIT_S
with LogTimer(self.log_prefix + "Got IP"):
ip = self._wait_for_ip(deadline)
cli_logger.doassert(ip is not None, "Could not get node IP.") # todo: msg
assert ip is not None, "Unable to find IP of node"
self.ssh_ip = ip
# This should run before any SSH commands and therefore ensure that
# the ControlPath directory exists, allowing SSH to maintain
# persistent sessions later on.
if self.ssh_control_path is not None:
try:
os.makedirs(self.ssh_control_path, mode=0o700, exist_ok=True)
except OSError as e:
cli_logger.warning("{}", str(e)) # todo: msg
def _run_helper(
self,
final_cmd: List[str],
with_output: bool = False,
exit_on_fail: bool = False,
silent: bool = False,
):
"""Run a command that was already setup with SSH and `bash` settings.
Args:
final_cmd (List[str]):
Full command to run. Should include SSH options and other
processing that we do.
with_output (bool):
If `with_output` is `True`, command stdout will be captured and
returned.
exit_on_fail (bool):
If `exit_on_fail` is `True`, the process will exit
if the command fails (exits with a code other than 0).
silent: If true, the command output will be silenced.
Raises:
ProcessRunnerError: If using new log style and disabled
login shells.
click.ClickException: If using login shells.
"""
try:
# For now, if the output is needed we just skip the new logic.
# In the future we could update the new logic to support
# capturing output, but it is probably not needed.
if not with_output:
return run_cmd_redirected(
final_cmd,
process_runner=self.process_runner,
silent=silent,
use_login_shells=is_using_login_shells(),
)
else:
return self.process_runner.check_output(final_cmd)
except subprocess.CalledProcessError as e:
joined_cmd = " ".join(final_cmd)
if not is_using_login_shells():
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=e.returncode,
command=joined_cmd,
)
if exit_on_fail:
raise click.ClickException(
"Command failed:\n\n {}\n".format(joined_cmd)
) from None
else:
fail_msg = "SSH command failed."
if is_output_redirected():
fail_msg += " See above for the output from the failure."
raise click.ClickException(fail_msg) from None
finally:
# Do our best to flush output to terminal.
# See https://github.com/ray-project/ray/pull/19473.
sys.stdout.flush()
sys.stderr.flush()
def run(
self,
cmd: Optional[str] = None,
timeout: int = 120,
exit_on_fail: bool = False,
port_forward: Optional[List[Tuple[int, int]]] = None,
with_output: bool = False,
environment_variables: Optional[Dict[str, object]] = None,
run_env: str = "auto", # Unused argument.
ssh_options_override_ssh_key: str = "",
shutdown_after_run: bool = False,
silent: bool = False,
) -> str:
if shutdown_after_run:
cmd += "; sudo shutdown -h now"
if ssh_options_override_ssh_key:
if self.ssh_proxy_command:
ssh_options = SSHOptions(
ssh_options_override_ssh_key, ProxyCommand=self.ssh_proxy_command
)
else:
ssh_options = SSHOptions(ssh_options_override_ssh_key)
else:
ssh_options = self.ssh_options
assert isinstance(
ssh_options, SSHOptions
), "ssh_options must be of type SSHOptions, got {}".format(type(ssh_options))
self._set_ssh_ip_if_required()
if is_using_login_shells():
ssh = ["ssh", "-tt"]
else:
ssh = ["ssh"]
if port_forward:
with cli_logger.group("Forwarding ports"):
if not isinstance(port_forward, list):
port_forward = [port_forward]
for local, remote in port_forward:
cli_logger.verbose(
"Forwarding port {} to port {} on localhost.",
cf.bold(local),
cf.bold(remote),
) # todo: msg
ssh += ["-L", "{}:localhost:{}".format(local, remote)]
final_cmd = (
ssh
+ ssh_options.to_ssh_options_list(timeout=timeout)
+ ["{}@{}".format(self.ssh_user, self.ssh_ip)]
)
if cmd:
if environment_variables:
cmd = _with_environment_variables(cmd, environment_variables)
if is_using_login_shells():
final_cmd += _with_interactive(cmd)
else:
final_cmd += [cmd]
else:
# We do this because `-o ControlMaster` causes the `-N` flag to
# still create an interactive shell in some ssh versions.
final_cmd.append("while true; do sleep 86400; done")
cli_logger.verbose("Running `{}`", cf.bold(cmd))
with cli_logger.indented():
cli_logger.very_verbose(
"Full command is `{}`", cf.bold(" ".join(final_cmd))
)
if cli_logger.verbosity > 0:
with cli_logger.indented():
return self._run_helper(
final_cmd, with_output, exit_on_fail, silent=silent
)
else:
return self._run_helper(final_cmd, with_output, exit_on_fail, silent=silent)
def _create_rsync_filter_args(self, options):
rsync_excludes = options.get("rsync_exclude") or []
rsync_filters = options.get("rsync_filter") or []
exclude_args = [
["--exclude", rsync_exclude] for rsync_exclude in rsync_excludes
]
filter_args = [
["--filter", "dir-merge,- {}".format(rsync_filter)]
for rsync_filter in rsync_filters
]
# Combine and flatten the two lists
return [arg for args_list in exclude_args + filter_args for arg in args_list]
def run_rsync_up(self, source, target, options=None):
self._set_ssh_ip_if_required()
options = options or {}
# on windows use scp -r instead of rsync
if sys.platform == "win32":
# Use scp as fallback for Windows
command = ["scp", "-r"]
command += self.ssh_options.to_ssh_options_list(timeout=120)
command += [source, "{}@{}:{}".format(self.ssh_user, self.ssh_ip, target)]
else:
command = ["rsync"]
command += [
"--rsh",
subprocess.list2cmdline(
["ssh"] + self.ssh_options.to_ssh_options_list(timeout=120)
),
]
command += ["-avz"]
command += self._create_rsync_filter_args(options=options)
command += [source, "{}@{}:{}".format(self.ssh_user, self.ssh_ip, target)]
cli_logger.verbose("Running `{}`", cf.bold(" ".join(command)))
self._run_helper(command, silent=is_rsync_silent())
def run_rsync_down(self, source, target, options=None):
self._set_ssh_ip_if_required()
# on Windows use scp -r instead of rsync
if sys.platform == "win32":
# Use scp as fallback for Windows
command = ["scp", "-r"]
command += self.ssh_options.to_ssh_options_list(timeout=120)
command += ["{}@{}:{}".format(self.ssh_user, self.ssh_ip, source), target]
else:
command = ["rsync"]
command += [
"--rsh",
subprocess.list2cmdline(
["ssh"] + self.ssh_options.to_ssh_options_list(timeout=120)
),
]
command += ["-avz"]
command += self._create_rsync_filter_args(options=options)
command += ["{}@{}:{}".format(self.ssh_user, self.ssh_ip, source), target]
cli_logger.verbose("Running `{}`", cf.bold(" ".join(command)))
self._run_helper(command, silent=is_rsync_silent())
def remote_shell_command_str(self):
if self.ssh_private_key:
return "ssh -o IdentitiesOnly=yes -i {} {}@{}\n".format(
self.ssh_private_key, self.ssh_user, self.ssh_ip
)
else:
return "ssh -o IdentitiesOnly=yes {}@{}\n".format(
self.ssh_user, self.ssh_ip
)
| SSHCommandRunner |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0155_custom_git_checkout_step.py | {
"start": 149,
"end": 863
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0154_set_latest_build"),
]
operations = [
migrations.AddField(
model_name="historicalproject",
name="git_checkout_command",
field=models.JSONField(
blank=True, null=True, verbose_name="Custom command to execute before Git checkout"
),
),
migrations.AddField(
model_name="project",
name="git_checkout_command",
field=models.JSONField(
blank=True, null=True, verbose_name="Custom command to execute before Git checkout"
),
),
]
| Migration |
python | pytorch__pytorch | torch/_dynamo/types.py | {
"start": 2249,
"end": 2409
} | class ____:
code: types.CodeType
guard_manager: GuardFn
compile_id: CompileId
trace_annotation: str = "Unknown"
@dataclasses.dataclass
| GuardedCode |
python | keon__algorithms | tests/test_graph.py | {
"start": 10359,
"end": 10865
} | class ____(unittest.TestCase):
def test_find_all_paths(self):
graph = {'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D', 'F'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
paths = find_path.find_all_path(graph, 'A', 'F')
print(paths)
self.assertEqual(sorted(paths), sorted([
['A', 'C', 'F'],
['A', 'B', 'C', 'F'],
['A', 'B', 'D', 'C', 'F'],
]))
| TestFindPath |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/nonzero.py | {
"start": 154,
"end": 3035
} | class ____(Operator):
"""Operator for finding nonzero elements in a tensor."""
def __init__(self):
super().__init__("nonzero")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.nonzero"
def can_produce(self, output_spec: Spec) -> bool:
"""Nonzero produces a tensor with shape (n_nonzero, n_dims).
We can deterministically synthesize inputs to match any 2D int64 output
shape (k, d) without data-dependent guards by constructing an input with
exactly k non-zero elements and d dimensions.
"""
return (
isinstance(output_spec, TensorSpec)
and output_spec.dtype in [torch.int64, torch.long]
and len(output_spec.size) == 2
)
def fuzz_inputs_specs(self, output_spec: Spec, num_inputs: int = 1) -> list[Spec]:
"""Generate input spec for nonzero operation.
The actual values will be synthesized in codegen to achieve the target size.
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError("NonzeroOperator can only produce TensorSpec outputs")
# Provide a placeholder spec; codegen will ignore the actual input content
# and synthesize a tensor with desired nonzero count and dimensionality.
d = output_spec.size[1]
input_spec = TensorSpec(
size=tuple([1] * d) if d > 0 else (),
stride=tuple([1] * d) if d > 0 else (),
dtype=torch.bool,
)
return [input_spec]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for nonzero using synthesized input to match target size.
No data-dependent conditionals/guards. Constructs an input with exactly
k = output_spec.size[0] non-zero elements and d = output_spec.size[1] dims,
then calls torch.nonzero on it.
"""
if len(input_names) != 1:
raise ValueError("NonzeroOperator requires exactly one input")
if not isinstance(output_spec, TensorSpec) or len(output_spec.size) != 2:
raise ValueError("NonzeroOperator requires 2D TensorSpec output")
k = output_spec.size[0]
d = output_spec.size[1]
# Construct concrete shape literal like (k, 1, 1, ...)
shape_elems = [str(k)] + ["1"] * max(0, d - 1)
shape_literal = (
"(" + ", ".join(shape_elems) + ("," if d == 1 else "") + ")"
if d > 0
else "()"
)
return (
f"_x_nz = torch.zeros({shape_literal}, dtype=torch.bool, device={input_names[0]}.device)\n"
f"_x_nz_flat = _x_nz.reshape(-1)\n"
f"_x_nz_flat[:{k}] = True\n"
f"{output_name} = torch.nonzero(_x_nz)"
)
| NonzeroOperator |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/mirrored_strategy_test.py | {
"start": 20430,
"end": 22337
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
device_type = test_util.preferred_device_type()
if device_type != 'TPU':
test_util.reset_logical_devices(device_type, 2)
self.device_type = device_type
def test_explicit_device_list(self):
device_list = [f'/{self.device_type}:{i}' for i in range(2)]
strategy = mirrored_strategy.MirroredStrategy(devices=device_list)
mesh = strategy.mesh
self.assertEqual(mesh.num_local_devices(), 2)
self.assertEqual(mesh.shape(), [2,])
self.assertEqual(mesh.dim_names, ['batch'])
self.assertIn(
f'/job:localhost/replica:0/task:0/device:{self.device_type}:0',
mesh.local_devices()[0])
self.assertIn(
f'/job:localhost/replica:0/task:0/device:{self.device_type}:1',
mesh.local_devices()[1])
# Also make sure the host mesh works since it is required by dataset
self.assertIsNotNone(mesh.host_mesh())
def test_implicit_device_list(self):
strategy = mirrored_strategy.MirroredStrategy()
mesh = strategy.mesh
self.assertEqual(mesh.num_local_devices(), 2)
self.assertEqual(mesh.shape(), [2,])
self.assertIn(
f'/job:localhost/replica:0/task:0/device:{self.device_type}:0',
mesh.local_devices()[0])
self.assertIn(
f'/job:localhost/replica:0/task:0/device:{self.device_type}:1',
mesh.local_devices()[1])
# Also make sure the host mesh works since it is required by dataset
self.assertIsNotNone(mesh.host_mesh())
def test_mesh_with_device_list(self):
device_list = [f'/{self.device_type}:{i}' for i in range(2)]
mesh = mesh_util.create_mesh([('batch', 2)], devices=device_list)
with self.assertRaisesRegex(
ValueError, 'Mesh and devices can not be provided at the same time'):
_ = mirrored_strategy.MirroredStrategy(mesh=mesh, devices=device_list)
| StrategyCreationTest |
python | kamyu104__LeetCode-Solutions | Python/clone-binary-tree-with-random-pointer.py | {
"start": 272,
"end": 378
} | class ____(object):
def __init__(self, val=0, left=None, right=None, random=None):
pass
| NodeCopy |
python | paramiko__paramiko | paramiko/kex_group14.py | {
"start": 1044,
"end": 1731
} | class ____(KexGroup1):
# http://tools.ietf.org/html/rfc3526#section-3
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
G = 2
name = "diffie-hellman-group14-sha1"
hash_algo = sha1
| KexGroup14 |
python | tensorflow__tensorflow | tensorflow/python/eager/monitoring.py | {
"start": 9382,
"end": 10216
} | class ____(Metric):
"""A stateful class for updating a gauge-like string metric.
This class encapsulates a set of string values (or a single value for a
label-less metric). Each value is identified by a tuple of labels. The class
allows the user to set each value.
"""
__slots__ = []
def __init__(self, name, description, *labels):
"""Creates a new StringGauge.
Args:
name: name of the new metric.
description: description of the new metric.
*labels: The label list of the new metric.
"""
super(StringGauge, self).__init__('StringGauge', _string_gauge_methods,
len(labels), name, description, *labels)
def get_cell(self, *labels):
"""Retrieves the cell."""
return StringGaugeCell(super(StringGauge, self).get_cell(*labels))
| StringGauge |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1537139,
"end": 1537755
} | class ____(sgqlc.types.Type, Node):
"""Represents an 'unlocked' event on a given issue or pull request."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "lockable")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
lockable = sgqlc.types.Field(sgqlc.types.non_null(Lockable), graphql_name="lockable")
"""Object that was unlocked."""
| UnlockedEvent |
python | kamyu104__LeetCode-Solutions | Python/largest-3-same-digit-number-in-string.py | {
"start": 38,
"end": 493
} | class ____(object):
def largestGoodInteger(self, num):
"""
:type num: str
:rtype: str
"""
result = ''
cnt = 0
for i, x in enumerate(num):
cnt += 1
if i+1 < len(num) and num[i] == num[i+1]:
continue
if cnt >= 3:
result = max(result, num[i])
cnt = 0
return result*3
# Time: O(n)
# Space: O(1)
# string
| Solution |
python | walkccc__LeetCode | solutions/1704. Determine if String Halves Are Alike/1704.py | {
"start": 0,
"end": 248
} | class ____:
def halvesAreAlike(self, s: str) -> bool:
VOWELS = 'aeiouAEIOU'
aVowelsCount = sum(c in VOWELS for c in s[:len(s) // 2])
bVowelsCount = sum(c in VOWELS for c in s[len(s) // 2:])
return aVowelsCount == bVowelsCount
| Solution |
python | realpython__materials | python-callable-instances/counter.py | {
"start": 0,
"end": 162
} | class ____:
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
def __call__(self):
self.increment()
| Counter |
python | PrefectHQ__prefect | src/prefect/events/clients.py | {
"start": 13741,
"end": 14961
} | class ____(PrefectEventsClient):
"""A Prefect Events client that BOTH records all events sent to it for inspection
during tests AND sends them to a Prefect server."""
last: ClassVar["Optional[AssertingPassthroughEventsClient]"] = None
all: ClassVar[list["AssertingPassthroughEventsClient"]] = []
args: tuple[Any, ...]
kwargs: dict[str, Any]
events: list[Event]
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
AssertingPassthroughEventsClient.last = self
AssertingPassthroughEventsClient.all.append(self)
self.args = args
self.kwargs = kwargs
@classmethod
def reset(cls) -> None:
cls.last = None
cls.all = []
def pop_events(self) -> list[Event]:
events = self.events
self.events = []
return events
async def _emit(self, event: Event) -> None:
# actually send the event to the server
await super()._emit(event)
# record the event for inspection
self.events.append(event)
async def __aenter__(self) -> Self:
await super().__aenter__()
self.events = []
return self
| AssertingPassthroughEventsClient |
python | getsentry__sentry | src/sentry/api/serializers/models/organization_member/expand/projects.py | {
"start": 618,
"end": 3908
} | class ____(OrganizationMemberSerializer):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.projects = {p.id: p for p in kwargs.pop("projects", [])}
self.project_ids = set(self.projects.keys())
super().__init__(*args, **kwargs)
def get_attrs(
self,
item_list: Sequence[OrganizationMember],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> MutableMapping[OrganizationMember, MutableMapping[str, Any]]:
attrs = super().get_attrs(item_list, user)
# Get all the OrganizationMemberTeam relations so we can map
# the member id to the list of team ids
#
# Note that we're intentionally only working with `team_id`
# to avoid having to fetch the team model as well.
member_teams = OrganizationMemberTeam.objects.filter(
organizationmember_id__in=[om.id for om in item_list],
team__status=TeamStatus.ACTIVE,
).values_list("team_id", "organizationmember_id", named=True)
# The set of team ids, this will be used to filter down the `ProjectTeam` below
team_ids = set()
# Mapping from member id to team ids they belong to.
#
# Previously, we were using a `select_related` when fetching the OrganizationMember.
# This resulted in django trying to set the attributes for the many to many relation
# which was slow.
team_ids_by_member_id = defaultdict(list)
# Be very careful here. We are intentionally only using `team_id` and `organizationmember_id`.
# This is to stop django from fetching these models. We don't even want django to do
# any kind of prefetching here.
for member_team in member_teams:
team_ids.add(member_team.team_id)
team_ids_by_member_id[member_team.organizationmember_id].append(member_team.team_id)
# Mapping from team id to projects that belong to the team.
#
# We require the caller to pass in the list of projects (not just ids to avoid an extra query)
# Make sure we only work with `team_id` and not the team object so django doesn't fetching it.
projects_by_team_id = defaultdict(list)
for project_team in ProjectTeam.objects.filter(
project_id__in=self.project_ids,
team_id__in=team_ids,
).values_list("team_id", "project_id", named=True):
projects_by_team_id[project_team.team_id].append(self.projects[project_team.project_id])
for org_member in item_list:
projects = set()
for team_id in team_ids_by_member_id[org_member.id]:
for project in projects_by_team_id[team_id]:
projects.add(project.slug)
projects_list = list(projects)
projects_list.sort()
attrs[org_member]["projects"] = projects_list
return attrs
def serialize(
self, obj: OrganizationMember, attrs: Mapping[str, Any], user: Any, **kwargs: Any
) -> OrganizationMemberWithProjectsResponse:
d = cast(OrganizationMemberWithProjectsResponse, super().serialize(obj, attrs, user))
d["projects"] = attrs.get("projects", [])
return d
| OrganizationMemberWithProjectsSerializer |
python | ray-project__ray | python/ray/_common/filters.py | {
"start": 112,
"end": 1962
} | class ____(logging.Filter):
TASK_LEVEL_LOG_KEYS = [
LogKey.TASK_ID.value,
LogKey.TASK_NAME.value,
LogKey.TASK_FUNCTION_NAME.value,
]
@classmethod
def get_ray_core_logging_context(cls) -> Dict[str, Any]:
"""
Get the ray core logging context as a dict.
Only use this function if you need include the attributes to the log record
yourself by bypassing the filter.
"""
if not ray.is_initialized():
# There is no additional context if ray is not initialized
return {}
runtime_context = ray.get_runtime_context()
ray_core_logging_context = {
LogKey.JOB_ID.value: runtime_context.get_job_id(),
LogKey.WORKER_ID.value: runtime_context.get_worker_id(),
LogKey.NODE_ID.value: runtime_context.get_node_id(),
}
if runtime_context.worker.mode == ray.WORKER_MODE:
ray_core_logging_context[
LogKey.ACTOR_ID.value
] = runtime_context.get_actor_id()
ray_core_logging_context[
LogKey.TASK_ID.value
] = runtime_context.get_task_id()
ray_core_logging_context[
LogKey.TASK_NAME.value
] = runtime_context.get_task_name()
ray_core_logging_context[
LogKey.TASK_FUNCTION_NAME.value
] = runtime_context.get_task_function_name()
ray_core_logging_context[
LogKey.ACTOR_NAME.value
] = runtime_context.get_actor_name()
return ray_core_logging_context
def filter(self, record):
context = self.get_ray_core_logging_context()
for key, value in context.items():
if value is not None:
setattr(record, key, value)
return True
| CoreContextFilter |
python | getsentry__sentry | tests/sentry_plugins/pushover/test_plugin.py | {
"start": 485,
"end": 5161
} | class ____(PluginTestCase):
@cached_property
def plugin(self) -> PushoverPlugin:
return PushoverPlugin()
def test_is_configured(self) -> None:
assert self.plugin.is_configured(self.project) is False
self.plugin.set_option("apikey", "abcdef", self.project)
assert self.plugin.is_configured(self.project) is False
self.plugin.set_option("userkey", "abcdef", self.project)
assert self.plugin.is_configured(self.project) is True
@responses.activate
def test_simple_notification(self) -> None:
responses.add("POST", "https://api.pushover.net/1/messages.json", body=SUCCESS)
self.plugin.set_option("userkey", "abcdef", self.project)
self.plugin.set_option("apikey", "ghijkl", self.project)
event = self.store_event(
data={"message": "Hello world", "level": "warning"}, project_id=self.project.id
)
assert event.group is not None
group = event.group
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}):
self.plugin.notify(notification)
request = responses.calls[0].request
payload = parse_qs(request.body)
assert payload == {
"message": [f"{event.title}\n\nTags: level=warning"],
"title": ["Bar: Hello world"],
"url": [
f"http://example.com/organizations/baz/issues/{group.id}/?referrer=pushover_plugin"
],
"url_title": ["Issue Details"],
"priority": ["0"],
"user": ["abcdef"],
"token": ["ghijkl"],
"expire": ["90"],
"retry": ["30"],
}
@responses.activate
def test_emergency_notification(self) -> None:
responses.add("POST", "https://api.pushover.net/1/messages.json", body=SUCCESS)
self.plugin.set_option("userkey", "abcdef", self.project)
self.plugin.set_option("apikey", "ghijkl", self.project)
self.plugin.set_option("priority", "2", self.project)
self.plugin.set_option("expire", 90, self.project)
self.plugin.set_option("retry", 30, self.project)
event = self.store_event(
data={"message": "Hello world", "level": "warning"}, project_id=self.project.id
)
assert event.group is not None
group = event.group
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}):
self.plugin.notify(notification)
request = responses.calls[0].request
payload = parse_qs(request.body)
assert payload == {
"message": [f"{event.title}\n\nTags: level=warning"],
"title": ["Bar: Hello world"],
"url": [
f"http://example.com/organizations/baz/issues/{group.id}/?referrer=pushover_plugin"
],
"url_title": ["Issue Details"],
"priority": ["2"],
"user": ["abcdef"],
"token": ["ghijkl"],
"expire": ["90"],
"retry": ["30"],
}
def test_no_secrets(self) -> None:
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
self.plugin.set_option("userkey", "abcdef", self.project)
self.plugin.set_option("apikey", "abcdef", self.project)
url = reverse(
"sentry-api-0-project-plugin-details",
args=[self.org.slug, self.project.slug, "pushover"],
)
res = self.client.get(url)
config = orjson.loads(res.content)["config"]
userkey_config = [item for item in config if item["name"] == "userkey"][0]
apikey_config = [item for item in config if item["name"] == "apikey"][0]
assert userkey_config.get("type") == "secret"
assert userkey_config.get("value") is None
assert userkey_config.get("hasSavedValue") is True
assert userkey_config.get("prefix") == "abcd"
assert apikey_config.get("type") == "secret"
assert apikey_config.get("value") is None
assert apikey_config.get("hasSavedValue") is True
assert apikey_config.get("prefix") == "abcd"
| PushoverPluginTest |
python | huggingface__transformers | tests/models/smolvlm/test_processing_smolvlm.py | {
"start": 901,
"end": 25984
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = SmolVLMProcessor
videos_input_name = "pixel_values"
model_id = "HuggingFaceTB/SmolVLM2-256M-Video-Instruct"
@classmethod
def _setup_test_attributes(cls, processor):
cls.image1 = load_image(
url_to_local_path(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
)
)
cls.image2 = load_image(
url_to_local_path(
url_to_local_path("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
)
)
cls.image3 = load_image(
url_to_local_path(
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
)
)
cls.bos_token = processor.tokenizer.bos_token
cls.image_token = processor.image_token
cls.video_token = processor.video_token
cls.fake_image_token = processor.fake_image_token
cls.global_img_token = processor.global_image_token
cls.bos_token_id = processor.tokenizer.convert_tokens_to_ids(cls.bos_token)
cls.image_token_id = processor.tokenizer.convert_tokens_to_ids(cls.image_token)
cls.fake_image_token_id = processor.tokenizer.convert_tokens_to_ids(cls.fake_image_token)
cls.global_img_tokens_id = processor.tokenizer(cls.global_img_token, add_special_tokens=False)["input_ids"]
cls.padding_token_id = processor.tokenizer.pad_token_id
cls.image_seq_len = processor.image_seq_len
@staticmethod
def prepare_processor_dict():
return {
"image_seq_len": 2,
"chat_template": "<|im_start|>{% for message in messages %}{{message['role'] | capitalize}}{% if message['content'][0]['type'] == 'image' %}{{':'}}{% else %}{{': '}}{% endif %}{% for line in message['content'] %}{% if line['type'] == 'text' %}{{line['text']}}{% elif line['type'] == 'image' %}{{ '<image>' }}{% endif %}{% endfor %}<end_of_utterance>\n{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
}
# Override as SmolVLM needs images/video to be an explicitly nested batch
def prepare_image_inputs(self, batch_size: int | None = None):
"""This function prepares a list of PIL images for testing"""
images = super().prepare_image_inputs(batch_size)
if isinstance(images, (list, tuple)):
images = [[image] for image in images]
return images
def prepare_video_inputs(self, batch_size: int | None = None):
"""This function prepares a list of numpy videos."""
video_input = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] * 8
if batch_size is None:
return [[video_input]]
return [[video_input]] * batch_size
def get_split_image_expected_tokens(self, processor, image_rows, image_cols):
text_split_images = []
for n_h in range(image_rows):
for n_w in range(image_cols):
text_split_images += (
[self.fake_image_token_id]
+ processor.tokenizer(f"<row_{n_h + 1}_col_{n_w + 1}>", add_special_tokens=False)["input_ids"]
+ [self.image_token_id] * self.image_seq_len
)
text_split_images += processor.tokenizer("\n", add_special_tokens=False)["input_ids"]
text_split_images = text_split_images[:-1] # remove last newline
# add double newline, as it gets its own token
text_split_images += processor.tokenizer("\n\n", add_special_tokens=False)["input_ids"]
text_split_images += (
[self.fake_image_token_id]
+ self.global_img_tokens_id
+ [self.image_token_id] * self.image_seq_len
+ [self.fake_image_token_id]
)
return text_split_images
def test_process_interleaved_images_prompts_no_image_splitting(self):
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left")
processor_components["image_processor"] = self.get_component("image_processor", do_image_splitting=False)
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
# Test that a single image is processed correctly
inputs = processor(images=self.image1)
image1_expected_size = (512, 512)
self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, 1, 3, *image1_expected_size))
self.assertEqual(np.array(inputs["pixel_attention_mask"]).shape, (1, 1, *image1_expected_size))
# fmt: on
# Test a single sample with image and text
image_str = "<image>"
text_str = "In this image, we see"
text = image_str + text_str
inputs = processor(text=text, images=self.image1)
# fmt: off
tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False)
expected_input_ids = [[self.fake_image_token_id] + self.global_img_tokens_id + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + tokenized_sentence["input_ids"]]
self.assertEqual(inputs["input_ids"], expected_input_ids)
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])])
self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, 1, 3, *image1_expected_size))
self.assertEqual(np.array(inputs["pixel_attention_mask"]).shape, (1, 1, *image1_expected_size))
# fmt: on
# Test that batch is correctly processed
image_str = "<image>"
text_str_1 = "In this image, we see"
text_str_2 = "In this image, we see"
text = [
image_str + text_str_1,
image_str + image_str + text_str_2,
]
images = [[self.image1], [self.image2, self.image3]]
inputs = processor(text=text, images=images, padding=True)
# fmt: off
tokenized_sentence_1 = processor.tokenizer(text_str_1, add_special_tokens=False)
tokenized_sentence_2 = processor.tokenizer(text_str_2, add_special_tokens=False)
image_tokens = [self.fake_image_token_id] + self.global_img_tokens_id + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id]
expected_input_ids_1 = image_tokens + tokenized_sentence_1["input_ids"]
expected_input_ids_2 = 2 * image_tokens + tokenized_sentence_2["input_ids"]
# Pad the first input to match the second input
pad_len = len(expected_input_ids_2) - len(expected_input_ids_1)
padded_expected_input_ids_1 = [self.padding_token_id] * pad_len + expected_input_ids_1
self.assertEqual(
inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2]
)
self.assertEqual(
inputs["attention_mask"],
[[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)]
)
self.assertEqual(np.array(inputs['pixel_values']).shape, (2, 2, 3, 512, 512))
self.assertEqual(np.array(inputs['pixel_attention_mask']).shape, (2, 2, 512, 512))
# fmt: on
def test_process_interleaved_images_prompts_image_splitting(self):
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left")
processor_components["image_processor"] = self.get_component("image_processor", do_image_splitting=True)
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
# Test that a single image is processed correctly
inputs = processor(images=self.image1)
self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, 13, 3, 512, 512))
self.assertEqual(np.array(inputs["pixel_attention_mask"]).shape, (1, 13, 512, 512))
# fmt: on
self.maxDiff = None
# Test a single sample with image and text
image_str = "<image>"
text_str = "In this image, we see"
text = image_str + text_str
inputs = processor(text=text, images=self.image1)
# fmt: off
tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False)
split_image1_tokens = self.get_split_image_expected_tokens(processor, 3, 4)
expected_input_ids_1 = [split_image1_tokens + tokenized_sentence["input_ids"]]
self.assertEqual(inputs["input_ids"], expected_input_ids_1)
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids_1[0])])
self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, 13, 3, 512, 512))
self.assertEqual(np.array(inputs["pixel_attention_mask"]).shape, (1, 13, 512, 512))
# fmt: on
# Test that batch is correctly processed
image_str = "<image>"
text_str_1 = "In this image, we see"
text_str_2 = "bla, bla"
text = [
image_str + text_str_1,
text_str_2 + image_str + image_str,
]
images = [[self.image1], [self.image2, self.image3]]
inputs = processor(text=text, images=images, padding=True)
# fmt: off
tokenized_sentence_1 = processor.tokenizer(text_str_1, add_special_tokens=False)
tokenized_sentence_2 = processor.tokenizer(text_str_2, add_special_tokens=False)
split_image1_tokens = self.get_split_image_expected_tokens(processor, 3, 4)
split_image2_tokens = self.get_split_image_expected_tokens(processor, 4, 4)
split_image3_tokens = self.get_split_image_expected_tokens(processor, 3, 4)
expected_input_ids_1 = split_image1_tokens + tokenized_sentence_1["input_ids"]
expected_input_ids_2 = tokenized_sentence_2["input_ids"] + split_image2_tokens + split_image3_tokens
# Pad the first input to match the second input
pad_len = len(expected_input_ids_2) - len(expected_input_ids_1)
padded_expected_input_ids_1 = [self.padding_token_id] * pad_len + expected_input_ids_1
self.assertEqual(
inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2]
)
self.assertEqual(
inputs["attention_mask"],
[[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)]
)
self.assertEqual(np.array(inputs['pixel_values']).shape, (2, 30, 3, 512, 512))
self.assertEqual(np.array(inputs['pixel_attention_mask']).shape, (2, 30, 512, 512))
# fmt: on
def test_add_special_tokens_processor(self):
processor = self.get_processor()
image_str = "<image>"
text_str = "In this image, we see"
text = text_str + image_str
# fmt: off
inputs = processor(text=text, images=self.image1, add_special_tokens=False)
tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False)
split_image1_tokens = self.get_split_image_expected_tokens(processor, 3, 4)
expected_input_ids = [tokenized_sentence["input_ids"] + split_image1_tokens]
self.assertEqual(inputs["input_ids"], expected_input_ids)
inputs = processor(text=text, images=self.image1)
expected_input_ids = [tokenized_sentence["input_ids"] + split_image1_tokens]
self.assertEqual(inputs["input_ids"], expected_input_ids)
# fmt: on
@unittest.skip(reason="from @molbap @zucchini-nlp, passing non-nested images is error-prone and not recommended")
def test_non_nested_images_with_batched_text(self):
processor = self.get_processor()
processor.image_processor.do_image_splitting = False
image_str = "<image>"
text_str_1 = "In this image, we see"
text_str_2 = "In this image, we see"
text = [
image_str + text_str_1,
image_str + image_str + text_str_2,
]
images = [[self.image1], [self.image2, self.image3]]
inputs = processor(text=text, images=images, padding=True)
self.assertEqual(np.array(inputs["pixel_values"]).shape, (2, 2, 3, 512, 512))
self.assertEqual(np.array(inputs["pixel_attention_mask"]).shape, (2, 2, 512, 512))
# Copied from tests.models.idefics2.test_processing_idefics2.Idefics2ProcessorTest.test_process_interleaved_images_prompts_image_error
def test_process_interleaved_images_prompts_image_error(self):
processor = self.get_processor()
text = [
"This is a test sentence.",
"In this other sentence we try some good things",
]
images = [[self.image1], [self.image2]]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [[self.image1], []]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
text = [
"This is a test sentence.<image>",
"In this other sentence we try some good things<image>",
]
images = [[self.image1], [self.image2, self.image3]]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [[], [self.image2]]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [self.image1, self.image2, self.image3]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [self.image1]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
text = [
"This is a test sentence.",
"In this other sentence we try some good things<image>",
]
images = [[self.image1], []]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
images = [self.image1, self.image2]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
def test_apply_chat_template(self):
# Message contains content which a mix of lists with images and image urls and string
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What do these images show?"},
{"type": "image"},
{"type": "image"},
],
},
{
"role": "assistant",
"content": [
{
"type": "text",
"text": "The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.",
}
],
},
{"role": "user", "content": [{"type": "text", "text": "And who is that?"}]},
]
processor = self.get_processor()
# Make short sequence length to test that the fake tokens are added correctly
rendered = processor.apply_chat_template(messages, add_generation_prompt=True)
expected_rendered = (
"<|im_start|>User: What do these images show?<image><image><end_of_utterance>\n"
"Assistant: The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.<end_of_utterance>\n"
"User: And who is that?<end_of_utterance>\n"
"Assistant:"
)
self.assertEqual(rendered, expected_rendered)
@require_av
@require_torch
def test_apply_chat_template_video_frame_sampling(self):
# overridden because SmolVLM has special preprocessing for videos
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
messages = [
[
{
"role": "user",
"content": [
{
"type": "video",
"url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"
),
},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
num_frames = 3
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
num_frames=num_frames,
return_tensors="pt",
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1)
# SmolVLM doesn't sample `num_frames` exactly, by uses other sampling method
self.assertEqual(len(out_dict_with_video[self.videos_input_name][0]), 1)
# Load with `fps` arg
fps = 10
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
return_tensors="pt",
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1)
# SmolVLM doesn't sample 1 frame per second exactly, by uses other sampling method
self.assertEqual(len(out_dict_with_video[self.videos_input_name][0]), 4)
# NOTE: the last assert checks are removed
# Loading video as a list of frames (i.e. images) is not supported in SmolVLM
@require_torch
@require_vision
def test_unstructured_kwargs_batched(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
video_processor = self.get_component("video_processor")
tokenizer = self.get_component("tokenizer")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor, **processor_kwargs
)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2, modalities="image")
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
padding="max_length",
max_length=76,
truncation=True,
max_image_size={"longest_edge": 300},
)
self.assertEqual(inputs["pixel_values"].shape[2], 3)
self.assertEqual(inputs["pixel_values"].shape[3], 300)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
@require_vision
def test_unstructured_kwargs_batched_video(self):
if "video_processor" not in self.processor_class.get_attributes():
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2, modalities="video")
video_input = self.prepare_video_inputs(batch_size=2)
inputs = processor(
text=input_str,
videos=video_input,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1.0,
padding="max_length",
max_length=172,
)
self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0)
self.assertEqual(len(inputs["input_ids"][0]), 172)
@require_torch
@require_vision
def test_text_only_inference(self):
"""Test that the processor works correctly with text-only input."""
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
text = "This is a simple text without images."
inputs = processor(text=text)
tokenized_sentence = processor.tokenizer(text, add_special_tokens=False)
expected_input_ids = [tokenized_sentence["input_ids"]]
self.assertEqual(inputs["input_ids"], expected_input_ids)
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])])
self.assertTrue("pixel_values" not in inputs)
self.assertTrue("pixel_attention_mask" not in inputs)
# Test batch of texts without image tokens
texts = ["First text.", "Second piece of text."]
batch_inputs = processor(text=texts, padding=True)
tokenized_1 = processor.tokenizer(texts[0], add_special_tokens=False)
tokenized_2 = processor.tokenizer(texts[1], add_special_tokens=False)
expected_1 = tokenized_1["input_ids"]
expected_2 = tokenized_2["input_ids"]
# Pad the shorter sequence
pad_len = len(expected_2) - len(expected_1)
if pad_len > 0:
padded_expected_1 = [self.padding_token_id] * pad_len + expected_1
expected_attention_1 = [0] * pad_len + [1] * len(expected_1)
self.assertEqual(batch_inputs["input_ids"], [padded_expected_1, expected_2])
self.assertEqual(batch_inputs["attention_mask"], [expected_attention_1, [1] * len(expected_2)])
else:
pad_len = -pad_len
padded_expected_2 = [self.padding_token_id] * pad_len + expected_2
expected_attention_2 = [0] * pad_len + [1] * len(expected_2)
self.assertEqual(batch_inputs["input_ids"], [expected_1, padded_expected_2])
self.assertEqual(batch_inputs["attention_mask"], [[1] * len(expected_1), expected_attention_2])
@require_torch
@require_vision
def test_missing_images_error(self):
"""Test that appropriate error is raised when images are referenced but not provided."""
processor = self.get_processor()
# Test single text with image token but no image
text = "Let me show you this image: <image> What do you think?"
with self.assertRaises(ValueError) as context:
processor(text=text)
self.assertTrue("tokens in the text but no images/videos were passed" in str(context.exception))
# Test batch with image tokens but no images
texts = [
"First text with <image> token.",
"Second text <image> with token.",
]
with self.assertRaises(ValueError) as context:
processor(text=texts)
self.assertTrue("tokens in the text but no images/videos were passed" in str(context.exception))
# Test with None as Images
with self.assertRaises(ValueError) as context:
processor(text=text, images=None)
self.assertTrue("tokens in the text but no images/videos were passed" in str(context.exception))
with self.assertRaises(ValueError) as context:
processor(text=texts, images=None)
self.assertTrue("tokens in the text but no images/videos were passed" in str(context.exception))
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modalities="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=20,
)
@unittest.skip(
"SmolVLM cannot accept list of decoded video frames, because it needs to know video fps and duration"
)
def test_apply_chat_template_decoded_video_0(self):
pass
| SmolVLMProcessorTest |
python | huggingface__transformers | tests/models/blip_2/test_modeling_blip_2.py | {
"start": 27099,
"end": 30410
} | class ____:
def __init__(
self,
parent,
vision_kwargs=None,
qformer_kwargs=None,
text_kwargs=None,
is_training=True,
num_query_tokens=10,
image_token_index=4,
):
if vision_kwargs is None:
vision_kwargs = {}
if qformer_kwargs is None:
qformer_kwargs = {}
if text_kwargs is None:
text_kwargs = {}
self.parent = parent
self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs)
self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs)
self.text_model_tester = Blip2TextModelTester(parent, **text_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.seq_length = self.text_model_tester.seq_length # need seq_length for common tests
self.encoder_seq_length = self.text_model_tester.encoder_seq_length
self.is_training = is_training
self.num_query_tokens = num_query_tokens
self.image_token_index = image_token_index
def prepare_config_and_inputs(self):
_, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
(
_,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = self.text_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, lm_labels
def get_config(self):
return Blip2Config(
vision_config=self.vision_model_tester.get_config(),
qformer_config=self.qformer_model_tester.get_config(),
text_config=self.text_model_tester.get_config(),
num_query_tokens=self.num_query_tokens,
image_token_index=self.image_token_index,
)
def create_and_check_for_conditional_generation(
self, config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, labels
):
model = Blip2ForConditionalGeneration(config).to(torch_device).eval()
with torch.no_grad():
result = model(pixel_values, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask)
self.parent.assertEqual(
result.logits.shape,
(
self.vision_model_tester.batch_size,
self.text_model_tester.seq_length,
self.text_model_tester.vocab_size,
),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
pixel_values,
decoder_input_ids,
decoder_attention_mask,
labels,
) = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
return config, inputs_dict
@require_torch
| Blip2ModelTester |
python | pytorch__pytorch | test/mobile/model_test/nn_ops.py | {
"start": 1277,
"end": 2874
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.input1d = torch.randn(1, 16, 50)
self.module1d = nn.ModuleList(
[
nn.MaxPool1d(3, stride=2),
nn.AvgPool1d(3, stride=2),
nn.LPPool1d(2, 3, stride=2),
nn.AdaptiveMaxPool1d(3),
nn.AdaptiveAvgPool1d(3),
]
)
self.input2d = torch.randn(1, 16, 30, 10)
self.module2d = nn.ModuleList(
[
nn.MaxPool2d((3, 2), stride=(2, 1)),
nn.AvgPool2d((3, 2), stride=(2, 1)),
nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)),
nn.LPPool2d(2, 3, stride=(2, 1)),
nn.AdaptiveMaxPool2d((5, 7)),
nn.AdaptiveAvgPool2d(7),
]
)
self.input3d = torch.randn(1, 16, 20, 4, 4)
self.module3d = nn.ModuleList(
[
nn.MaxPool3d(2),
nn.AvgPool3d(2),
nn.FractionalMaxPool3d(2, output_ratio=(0.5, 0.5, 0.5)),
nn.AdaptiveMaxPool3d((5, 7, 9)),
nn.AdaptiveAvgPool3d((5, 7, 9)),
]
)
# TODO max_unpool
def forward(self):
return len(
(
[module(self.input1d) for i, module in enumerate(self.module1d)],
[module(self.input2d) for i, module in enumerate(self.module2d)],
[module(self.input3d) for i, module in enumerate(self.module3d)],
)
)
| NNPoolingModule |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 67974,
"end": 68927
} | class ____(TestCase):
def setUp(self):
pre_create_historical_record.connect(
add_dynamic_history_ip_address,
sender=HistoricalPollWithHistoricalIPAddress,
dispatch_uid="add_dynamic_history_ip_address",
)
def tearDown(self):
pre_create_historical_record.disconnect(
add_dynamic_history_ip_address,
sender=HistoricalPollWithHistoricalIPAddress,
dispatch_uid="add_dynamic_history_ip_address",
)
def test_signal_is_able_to_retrieve_request_from_context(self):
data = {"question": "Will it blend?", "pub_date": "2018-10-30"}
self.client.post(reverse("pollip-add"), data=data)
polls = PollWithHistoricalIPAddress.objects.all()
self.assertEqual(1, polls.count())
poll_history = polls[0].history.first()
self.assertEqual("127.0.0.1", poll_history.ip_address)
| ExtraFieldsDynamicIPAddressTestCase |
python | dask__dask | dask/dataframe/dask_expr/diagnostics/_analyze_plugin.py | {
"start": 2806,
"end": 4061
} | class ____(WorkerPlugin):
idempotent: ClassVar[bool] = True
name: ClassVar[str] = "analyze"
_statistics: defaultdict[str, Statistics]
_worker: Worker | None
def __init__(self) -> None:
self._worker = None
self._statistics = defaultdict(Statistics)
def setup(self, worker: Worker) -> None:
self._digests = defaultdict(lambda: defaultdict(lambda: defaultdict(Digest))) # type: ignore[var-annotated]
self._worker = worker
self._worker.handlers["analyze_get_statistics"] = self.get_statistics
def add(self, id: str, expr: str, metric: str, value: float):
self._statistics[id].add(expr, metric, value)
def get_statistics(self, id: str) -> Statistics:
return self._statistics.pop(id)
def get_worker_plugin() -> _AnalyzeWorkerPlugin:
from distributed import get_worker
try:
worker = get_worker()
except ValueError as e:
raise RuntimeError(
"``.analyze()`` requires Dask's distributed scheduler"
) from e
try:
return worker.plugins["analyze"]
except KeyError as e:
raise RuntimeError(
f"The worker {worker.address} does not have an Analyze plugin."
) from e
| _AnalyzeWorkerPlugin |
python | getsentry__sentry | src/sentry/integrations/slack/threads/activity_notifications.py | {
"start": 5423,
"end": 6364
} | class ____(_ExternalIssueCreatedActivity):
"""
Override class for Github, as the provider name that we want to display should be GitHub, not "Github"
"""
def get_formatted_provider_name(self) -> str:
return "GitHub"
_activity_classes = {
"asana": _AsanaExternalIssueCreatedActivity,
"github": _GithubExternalIssueCreatedActivity,
}
def _external_issue_activity_factory(activity: Activity) -> _ExternalIssueCreatedActivity:
"""
Returns the correct ExternalIssueCreatedActivity class based on the provider.
All classes have the same interface, the method for one is simply modified for its use case.
"""
base_activity = _ExternalIssueCreatedActivity(activity=activity)
provider = base_activity.get_provider()
ActivityClass = _activity_classes.get(provider, None)
return ActivityClass(activity=activity) if ActivityClass else base_activity
| _GithubExternalIssueCreatedActivity |
python | huggingface__transformers | src/transformers/models/audioflamingo3/configuration_audioflamingo3.py | {
"start": 821,
"end": 5553
} | class ____(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`AudioFlamingo3Encoder`]. It is used to instantiate an
AudioFlamingo3 audio encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of the AudioFlamingo3
architecture.
e.g. [nvidia/audio-flamingo-3-hf](https://huggingface.co/nvidia/audio-flamingo-3-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_mel_bins (`int`, *optional*, defaults to 128):
Number of mel features used per input features. Should correspond to the value used in the
`AudioFlamingo3Processor` class.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of encoder layers.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](https://huggingface.co/papers/1909.11556)
for more details.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_size (`int`, *optional*, defaults to 1280):
Dimensionality of the layers.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by dividing by sqrt(hidden_size).
max_source_positions (`int`, *optional*, defaults to 1500):
The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
Example:
```python
>>> from transformers import AudioFlamingo3EncoderConfig, AudioFlamingo3Encoder
>>> # Initializing an AudioFlamingo3EncoderConfig
>>> configuration = AudioFlamingo3EncoderConfig()
>>> # Initializing an AudioFlamingo3Encoder (with random weights)
>>> model = AudioFlamingo3Encoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "audioflamingo3_encoder"
attribute_map = {
"d_model": "hidden_size",
"encoder_layers": "num_hidden_layers",
"encoder_attention_heads": "num_attention_heads",
"encoder_ffn_dim": "intermediate_size",
"encoder_layerdrop": "layerdrop",
}
def __init__(
self,
num_mel_bins=128,
num_hidden_layers=32,
num_attention_heads=20,
intermediate_size=5120,
layerdrop=0.0,
activation_function="gelu",
hidden_size=1280,
dropout=0.0,
attention_dropout=0.0,
activation_dropout=0.0,
initializer_range=0.02,
scale_embedding=False,
max_source_positions=1500,
**kwargs,
):
super().__init__(**kwargs)
self.num_mel_bins = num_mel_bins
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.initializer_range = initializer_range
self.layerdrop = layerdrop
self.num_hidden_layers = num_hidden_layers
self.scale_embedding = scale_embedding
self.max_source_positions = max_source_positions
| AudioFlamingo3EncoderConfig |
python | numpy__numpy | numpy/f2py/tests/test_string.py | {
"start": 979,
"end": 2938
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "string", "fixed_string.f90")]
@staticmethod
def _sint(s, start=0, end=None):
"""Return the content of a string buffer as integer value.
For example:
_sint('1234') -> 4321
_sint('123A') -> 17321
"""
if isinstance(s, np.ndarray):
s = s.tobytes()
elif isinstance(s, str):
s = s.encode()
assert isinstance(s, bytes)
if end is None:
end = len(s)
i = 0
for j in range(start, min(end, len(s))):
i += s[j] * 10**j
return i
def _get_input(self, intent="in"):
if intent in ["in"]:
yield ""
yield "1"
yield "1234"
yield "12345"
yield b""
yield b"\0"
yield b"1"
yield b"\01"
yield b"1\0"
yield b"1234"
yield b"12345"
yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0')
yield np.array(b"") # array(b'', dtype='|S1')
yield np.array(b"\0")
yield np.array(b"1")
yield np.array(b"1\0")
yield np.array(b"\01")
yield np.array(b"1234")
yield np.array(b"123\0")
yield np.array(b"12345")
def test_intent_in(self):
for s in self._get_input():
r = self.module.test_in_bytes4(s)
# also checks that s is not changed inplace
expected = self._sint(s, end=4)
assert r == expected, s
def test_intent_inout(self):
for s in self._get_input(intent="inout"):
rest = self._sint(s, start=4)
r = self.module.test_inout_bytes4(s)
expected = self._sint(s, end=4)
assert r == expected
# check that the rest of input string is preserved
assert rest == self._sint(s, start=4)
| TestFixedString |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column09.py | {
"start": 315,
"end": 1210
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [47400832, 61387136]
data = [
[1, 2, 3, 4, 5],
[1, 2, 3, 2, 1],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5"}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | mlflow__mlflow | dev/clint/src/clint/rules/assign_before_append.py | {
"start": 48,
"end": 2027
} | class ____(Rule):
def _message(self) -> str:
return (
"Avoid unnecessary assignment before appending to a list. "
"Use a list comprehension instead."
)
@staticmethod
def check(node: ast.For, prev_stmt: ast.stmt | None) -> bool:
"""
Returns True if the for loop contains exactly two statements:
an assignment followed by appending that variable to a list, AND
the loop is immediately preceded by an empty list initialization.
Examples that should be flagged:
---
items = []
for x in data:
item = transform(x)
items.append(item)
---
"""
# Match: for loop with exactly 2 statements in body
match node:
case ast.For(body=[stmt1, stmt2]):
pass
case _:
return False
# Match stmt1: simple assignment (item = x)
match stmt1:
case ast.Assign(targets=[ast.Name(id=assigned_var)]):
pass
case _:
return False
# Match stmt2: list.append(item)
match stmt2:
case ast.Expr(
value=ast.Call(
func=ast.Attribute(value=ast.Name(id=list_name), attr="append"),
args=[ast.Name(id=appended_var)],
)
):
# Check if the appended variable is the same as the assigned variable
if appended_var != assigned_var:
return False
case _:
return False
# Only flag if prev_stmt is empty list initialization for the same list
match prev_stmt:
case ast.Assign(
targets=[ast.Name(id=prev_list_name)],
value=ast.List(elts=[]),
) if prev_list_name == list_name:
return True
case _:
return False
| AssignBeforeAppend |
python | pandas-dev__pandas | pandas/tests/io/formats/test_printing.py | {
"start": 1186,
"end": 1361
} | class ____(Mapping):
def __getitem__(self, key):
return 4
def __iter__(self):
return iter(["a", "b"])
def __len__(self):
return 2
| MyMapping |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/pipes/utils.py | {
"start": 4637,
"end": 7515
} | class ____(PipesMessageReader):
"""Message reader that reads messages by tailing a specified file.
Args:
path (str): The path of the file to which messages will be written. The file will be deleted
on close of the pipes session.
include_stdio_in_messages (bool): Whether to include stdout/stderr logs in the messages produced by the message writer in the external process.
cleanup_file (bool): Whether to delete the file on close of the pipes session.
"""
def __init__(
self, path: str, include_stdio_in_messages: bool = False, cleanup_file: bool = True
):
self._path = check.str_param(path, "path")
self._include_stdio_in_messages = check.bool_param(
include_stdio_in_messages, "include_stdio_in_messages"
)
self._cleanup_file = cleanup_file
def on_launched(self, params: PipesLaunchedData) -> None: # pyright: ignore[reportIncompatibleMethodOverride]
self.launched_payload = params
@contextmanager
def read_messages(
self,
handler: "PipesMessageHandler",
) -> Iterator[PipesParams]:
"""Set up a thread to read streaming messages from the external process by tailing the
target file.
Args:
handler (PipesMessageHandler): object to process incoming messages
Yields:
PipesParams: A dict of parameters that specifies where a pipes process should write
pipes protocol messages.
"""
is_session_closed = Event()
thread = None
try:
open(self._path, "w").close() # create file
thread = Thread(
target=self._reader_thread,
args=(handler, is_session_closed),
daemon=True,
)
thread.start()
yield {
PipesDefaultMessageWriter.FILE_PATH_KEY: self._path,
PipesDefaultMessageWriter.INCLUDE_STDIO_IN_MESSAGES_KEY: self._include_stdio_in_messages,
}
finally:
is_session_closed.set()
if thread:
thread.join()
if os.path.exists(self._path) and self._cleanup_file:
os.remove(self._path)
def _reader_thread(self, handler: "PipesMessageHandler", is_resource_complete: Event) -> None:
try:
for line in tail_file(self._path, lambda: is_resource_complete.is_set()):
message = json.loads(line)
handler.handle_message(message)
except:
handler.report_pipes_framework_exception(
f"{self.__class__.__name__} reader thread",
sys.exc_info(),
)
raise
def no_messages_debug_text(self) -> str:
return f"Attempted to read messages from file {self._path}."
@public
| PipesFileMessageReader |
python | huggingface__transformers | tests/models/mamba2/test_modeling_mamba2.py | {
"start": 1408,
"end": 2480
} | class ____(ConfigTester):
def _create_config(self, hidden_size: int, num_heads: int, expand: int, head_dim: int):
_input_dict = self.inputs_dict.copy()
_input_dict["hidden_size"] = hidden_size
_input_dict["num_heads"] = num_heads
_input_dict["expand"] = expand
_input_dict["head_dim"] = head_dim
return self.config_class(**_input_dict)
def test_hidden_size_compatibility(self):
self._create_config(hidden_size=2, num_heads=2, expand=2, head_dim=2)
self._create_config(hidden_size=4, num_heads=4, expand=2, head_dim=2)
self._create_config(hidden_size=2, num_heads=4, expand=4, head_dim=2)
with self.parent.assertRaises(ValueError):
self._create_config(hidden_size=2, num_heads=4, expand=2, head_dim=4)
with self.parent.assertRaises(ValueError):
self._create_config(hidden_size=4, num_heads=2, expand=4, head_dim=2)
def run_common_tests(self):
self.test_hidden_size_compatibility()
return super().run_common_tests()
| Mamba2ConfigTester |
python | sympy__sympy | sympy/physics/quantum/operator.py | {
"start": 5712,
"end": 6774
} | class ____(Operator):
"""A Hermitian operator that satisfies H == Dagger(H).
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
operator. For time-dependent operators, this will include the time.
Examples
========
>>> from sympy.physics.quantum import Dagger, HermitianOperator
>>> H = HermitianOperator('H')
>>> Dagger(H)
H
"""
is_hermitian = True
def _eval_inverse(self):
if isinstance(self, UnitaryOperator):
return self
else:
return Operator._eval_inverse(self)
def _eval_power(self, exp):
if isinstance(self, UnitaryOperator):
# so all eigenvalues of self are 1 or -1
if exp.is_even:
from sympy.core.singleton import S
return S.One # is identity, see Issue 24153.
elif exp.is_odd:
return self
# No simplification in all other cases
return Operator._eval_power(self, exp)
| HermitianOperator |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 38274,
"end": 42313
} | class ____(Operation):
def __init__(self, axis=None, *, name=None):
super().__init__(name=name)
# np.average() does not support axis as tuple as declared by the
# docstring, it only supports int or None.
self.axis = axis
def call(self, x, weights=None):
return backend.numpy.average(x, weights=weights, axis=self.axis)
def compute_output_spec(self, x, weights=None):
dtypes_to_resolve = [getattr(x, "dtype", type(x)), float]
if weights is not None:
shape_match = shape_equal(x.shape, weights.shape, allow_none=True)
if self.axis is not None:
shape_match_on_axis = shape_equal(
[x.shape[self.axis]], weights.shape, allow_none=True
)
dtypes_to_resolve.append(getattr(weights, "dtype", type(weights)))
dtype = dtypes.result_type(*dtypes_to_resolve)
if self.axis is None:
if weights is None or shape_match:
return KerasTensor([], dtype=dtype)
else:
raise ValueError(
"`weights` must have the same shape as `x` when "
f"`axis=None`, but received `weights.shape={weights.shape}`"
f" and `x.shape={x.shape}`."
)
if weights is None or shape_match_on_axis or shape_match:
return KerasTensor(
reduce_shape(x.shape, axis=[self.axis]), dtype=dtype
)
else:
# `weights` can either be a 1D array of length `x.shape[axis]` or
# of the same shape as `x`.
raise ValueError(
"`weights` must have the same size as `x` at "
f"`axis={self.axis}` but received "
f"`weights.shape={weights.shape}` while x.shape at "
f"`{self.axis}` is `{x.shape[self.axis]}`."
)
@keras_export(["keras.ops.average", "keras.ops.numpy.average"])
def average(x, axis=None, weights=None):
"""Compute the weighted average along the specified axis.
Args:
x: Input tensor.
axis: Integer along which to average `x`. The default, `axis=None`,
will average over all of the elements of the input tensor. If axis
is negative it counts from the last to the first axis.
weights: Tensor of weights associated with the values in `x`. Each
value in `x` contributes to the average according to its
associated weight. The weights array can either be 1-D (in which
case its length must be the size of a along the given axis) or of
the same shape as `x`. If `weights=None` (default), then all data
in `x` are assumed to have a weight equal to one.
The 1-D calculation is: `avg = sum(a * weights) / sum(weights)`.
The only constraint on weights is that `sum(weights)` must not be 0.
Returns:
Return the average along the specified axis.
Examples:
>>> data = keras.ops.arange(1, 5)
>>> data
array([1, 2, 3, 4], dtype=int32)
>>> keras.ops.average(data)
array(2.5, dtype=float32)
>>> keras.ops.average(
... keras.ops.arange(1, 11),
... weights=keras.ops.arange(10, 0, -1)
... )
array(4., dtype=float32)
>>> data = keras.ops.arange(6).reshape((3, 2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]], dtype=int32)
>>> keras.ops.average(
... data,
... axis=1,
... weights=keras.ops.array([1./4, 3./4])
... )
array([0.75, 2.75, 4.75], dtype=float32)
>>> keras.ops.average(
... data,
... weights=keras.ops.array([1./4, 3./4])
... )
Traceback (most recent call last):
...
ValueError: Axis must be specified when shapes of a and weights differ.
"""
if any_symbolic_tensors((x,)):
return Average(axis=axis).symbolic_call(x, weights=weights)
return backend.numpy.average(x, axis=axis, weights=weights)
| Average |
python | zarr-developers__zarr-python | src/zarr/core/attributes.py | {
"start": 284,
"end": 1687
} | class ____(MutableMapping[str, JSON]):
def __init__(self, obj: AnyArray | Group) -> None:
# key=".zattrs", read_only=False, cache=True, synchronizer=None
self._obj = obj
def __getitem__(self, key: str) -> JSON:
return self._obj.metadata.attributes[key]
def __setitem__(self, key: str, value: JSON) -> None:
new_attrs = dict(self._obj.metadata.attributes)
new_attrs[key] = value
self._obj = self._obj.update_attributes(new_attrs)
def __delitem__(self, key: str) -> None:
new_attrs = dict(self._obj.metadata.attributes)
del new_attrs[key]
self.put(new_attrs)
def __iter__(self) -> Iterator[str]:
return iter(self._obj.metadata.attributes)
def __len__(self) -> int:
return len(self._obj.metadata.attributes)
def put(self, d: dict[str, JSON]) -> None:
"""
Overwrite all attributes with the values from `d`.
Equivalent to the following pseudo-code, but performed atomically.
```python
attrs = {"a": 1, "b": 2}
attrs.clear()
attrs.update({"a": "3", "c": 4})
print(attrs)
#> {'a': '3', 'c': 4}
```
"""
self._obj.metadata.attributes.clear()
self._obj = self._obj.update_attributes(d)
def asdict(self) -> dict[str, JSON]:
return dict(self._obj.metadata.attributes)
| Attributes |
python | mlflow__mlflow | tests/models/test_cli.py | {
"start": 6294,
"end": 35136
} | class ____:
model_uri: str
model_registry_uri: str
input_json_path: Path
input_csv_path: Path
output_json_path: Path
x: np.ndarray
sk_model: sklearn.base.BaseEstimator
@pytest.fixture
def predict_test_setup(
iris_data: tuple[np.ndarray, np.ndarray],
sk_model: sklearn.neighbors.KNeighborsClassifier,
tmp_path: Path,
) -> PredictTestData:
with mlflow.start_run() as active_run:
mlflow.sklearn.log_model(sk_model, name="model", registered_model_name="impredicting")
model_uri = f"runs:/{active_run.info.run_id}/model"
model_registry_uri = "models:/impredicting/None"
input_json_path = tmp_path / "input.json"
input_csv_path = tmp_path / "input.csv"
output_json_path = tmp_path / "output.json"
x, _ = iris_data
with open(input_json_path, "w") as f:
json.dump({"dataframe_split": pd.DataFrame(x).to_dict(orient="split")}, f)
pd.DataFrame(x).to_csv(input_csv_path, index=False)
return PredictTestData(
model_uri=model_uri,
model_registry_uri=model_registry_uri,
input_json_path=input_json_path,
input_csv_path=input_csv_path,
output_json_path=output_json_path,
x=x,
sk_model=sk_model,
)
def test_predict_with_model_registry_uri(predict_test_setup: PredictTestData) -> None:
setup = predict_test_setup
subprocess.check_call(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
setup.model_registry_uri,
"-i",
setup.input_json_path,
"-o",
setup.output_json_path,
"--env-manager",
"local",
],
env=env_with_tracking_uri(),
)
actual = pd.read_json(setup.output_json_path, orient="records")
actual = actual[actual.columns[0]].values
expected = setup.sk_model.predict(setup.x)
assert all(expected == actual)
def test_predict_with_conda_and_install_mlflow(predict_test_setup: PredictTestData) -> None:
setup = predict_test_setup
subprocess.check_call(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
setup.model_uri,
"-i",
setup.input_json_path,
"-o",
setup.output_json_path,
*extra_options,
],
env=env_with_tracking_uri(),
)
actual = pd.read_json(setup.output_json_path, orient="records")
actual = actual[actual.columns[0]].values
expected = setup.sk_model.predict(setup.x)
assert all(expected == actual)
def test_predict_explicit_json_format_default_orient(predict_test_setup: PredictTestData) -> None:
setup = predict_test_setup
subprocess.check_call(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
setup.model_uri,
"-i",
setup.input_json_path,
"-o",
setup.output_json_path,
"-t",
"json",
*extra_options,
],
env=env_with_tracking_uri(),
)
actual = pd.read_json(setup.output_json_path, orient="records")
actual = actual[actual.columns[0]].values
expected = setup.sk_model.predict(setup.x)
assert all(expected == actual)
def test_predict_explicit_json_format_split_orient(predict_test_setup: PredictTestData) -> None:
# Note: This test has the same command as the previous one but tests orient==split
# The comment in original code mentions this should be split orient
setup = predict_test_setup
subprocess.check_call(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
setup.model_uri,
"-i",
setup.input_json_path,
"-o",
setup.output_json_path,
"-t",
"json",
*extra_options,
],
env=env_with_tracking_uri(),
)
actual = pd.read_json(setup.output_json_path, orient="records")
actual = actual[actual.columns[0]].values
expected = setup.sk_model.predict(setup.x)
assert all(expected == actual)
def test_predict_stdin_stdout(predict_test_setup: PredictTestData) -> None:
setup = predict_test_setup
stdout = subprocess.check_output(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
setup.model_uri,
"-t",
"json",
*extra_options,
],
input=setup.input_json_path.read_text(),
env=env_with_tracking_uri(),
text=True,
)
predictions = re.search(r"{\"predictions\": .*}", stdout).group(0)
actual = pd.read_json(StringIO(predictions), orient="records")
actual = actual[actual.columns[0]].values
expected = setup.sk_model.predict(setup.x)
assert all(expected == actual)
# NB: We do not test orient=records here because records may loose column ordering.
# orient == records is tested in other test with simpler model.
def test_predict_csv_format(predict_test_setup: PredictTestData) -> None:
setup = predict_test_setup
subprocess.check_call(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
setup.model_uri,
"-i",
setup.input_csv_path,
"-o",
setup.output_json_path,
"-t",
"csv",
*extra_options,
],
env=env_with_tracking_uri(),
)
actual = pd.read_json(setup.output_json_path, orient="records")
actual = actual[actual.columns[0]].values
expected = setup.sk_model.predict(setup.x)
assert all(expected == actual)
def test_predict_check_content_type(iris_data, sk_model, tmp_path):
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model, name="model", registered_model_name="impredicting")
model_registry_uri = "models:/impredicting/None"
input_json_path = tmp_path / "input.json"
input_csv_path = tmp_path / "input.csv"
output_json_path = tmp_path / "output.json"
x, _ = iris_data
with input_json_path.open("w") as f:
json.dump({"dataframe_split": pd.DataFrame(x).to_dict(orient="split")}, f)
pd.DataFrame(x).to_csv(input_csv_path, index=False)
# Throw errors for invalid content_type
prc = subprocess.run(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
model_registry_uri,
"-i",
input_json_path,
"-o",
output_json_path,
"-t",
"invalid",
"--env-manager",
"local",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env_with_tracking_uri(),
check=False,
)
assert prc.returncode != 0
assert "Content type must be one of json or csv." in prc.stderr.decode("utf-8")
def test_predict_check_input_path(iris_data, sk_model, tmp_path):
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model, name="model", registered_model_name="impredicting")
model_registry_uri = "models:/impredicting/None"
input_json_path = tmp_path / "input with space.json"
input_csv_path = tmp_path / "input.csv"
output_json_path = tmp_path / "output.json"
x, _ = iris_data
with input_json_path.open("w") as f:
json.dump({"dataframe_split": pd.DataFrame(x).to_dict(orient="split")}, f)
pd.DataFrame(x).to_csv(input_csv_path, index=False)
# Valid input path with space
prc = subprocess.run(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
model_registry_uri,
"-i",
f"{input_json_path}",
"-o",
output_json_path,
"--env-manager",
"local",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env_with_tracking_uri(),
check=False,
text=True,
)
assert prc.returncode == 0
# Throw errors for invalid input_path
prc = subprocess.run(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
model_registry_uri,
"-i",
f'{input_json_path}"; echo ThisIsABug! "',
"-o",
output_json_path,
"--env-manager",
"local",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env_with_tracking_uri(),
check=False,
text=True,
)
assert prc.returncode != 0
assert "ThisIsABug!" not in prc.stdout
assert "FileNotFoundError" in prc.stderr
prc = subprocess.run(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
model_registry_uri,
"-i",
f'{input_csv_path}"; echo ThisIsABug! "',
"-o",
output_json_path,
"-t",
"csv",
"--env-manager",
"local",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env_with_tracking_uri(),
check=False,
text=True,
)
assert prc.returncode != 0
assert "ThisIsABug!" not in prc.stdout
assert "FileNotFoundError" in prc.stderr
def test_predict_check_output_path(iris_data, sk_model, tmp_path):
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model, name="model", registered_model_name="impredicting")
model_registry_uri = "models:/impredicting/None"
input_json_path = tmp_path / "input.json"
input_csv_path = tmp_path / "input.csv"
output_json_path = tmp_path / "output.json"
x, _ = iris_data
with input_json_path.open("w") as f:
json.dump({"dataframe_split": pd.DataFrame(x).to_dict(orient="split")}, f)
pd.DataFrame(x).to_csv(input_csv_path, index=False)
prc = subprocess.run(
[
sys.executable,
"-m",
"mlflow",
"models",
"predict",
"-m",
model_registry_uri,
"-i",
input_json_path,
"-o",
f'{output_json_path}"; echo ThisIsABug! "',
"--env-manager",
"local",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env_with_tracking_uri(),
check=False,
text=True,
)
assert prc.returncode == 0
assert "ThisIsABug!" not in prc.stdout
def test_prepare_env_passes(sk_model):
if no_conda:
pytest.skip("This test requires conda.")
with TempDir(chdr=True):
with mlflow.start_run() as active_run:
mlflow.sklearn.log_model(sk_model, name="model")
model_uri = f"runs:/{active_run.info.run_id}/model"
# With conda
subprocess.run(
[
sys.executable,
"-m",
"mlflow",
"models",
"prepare-env",
"-m",
model_uri,
],
env=env_with_tracking_uri(),
check=True,
)
# Should be idempotent
subprocess.run(
[
sys.executable,
"-m",
"mlflow",
"models",
"prepare-env",
"-m",
model_uri,
],
env=env_with_tracking_uri(),
check=True,
)
def test_prepare_env_fails(sk_model):
if no_conda:
pytest.skip("This test requires conda.")
with TempDir(chdr=True):
with mlflow.start_run() as active_run:
mlflow.sklearn.log_model(
sk_model, name="model", pip_requirements=["does-not-exist-dep==abc"]
)
model_uri = f"runs:/{active_run.info.run_id}/model"
# With conda - should fail due to bad conda environment.
prc = subprocess.run(
[
sys.executable,
"-m",
"mlflow",
"models",
"prepare-env",
"-m",
model_uri,
],
env=env_with_tracking_uri(),
check=False,
)
assert prc.returncode != 0
@pytest.mark.parametrize("enable_mlserver", [True, False])
def test_generate_dockerfile(sk_model, enable_mlserver, tmp_path):
with mlflow.start_run() as active_run:
if enable_mlserver:
mlflow.sklearn.log_model(
sk_model, name="model", extra_pip_requirements=["/opt/mlflow", PROTOBUF_REQUIREMENT]
)
else:
mlflow.sklearn.log_model(sk_model, name="model")
model_uri = f"runs:/{active_run.info.run_id}/model"
extra_args = ["--install-mlflow"]
if enable_mlserver:
extra_args.append("--enable-mlserver")
output_directory = tmp_path.joinpath("output_directory")
pyfunc_generate_dockerfile(
output_directory,
model_uri,
extra_args=extra_args,
env=env_with_tracking_uri(),
)
assert output_directory.is_dir()
assert output_directory.joinpath("Dockerfile").exists()
assert output_directory.joinpath("model_dir").is_dir()
# Assert file is not empty
assert output_directory.joinpath("Dockerfile").stat().st_size != 0
@pytest.mark.parametrize("enable_mlserver", [True, False])
def test_build_docker(iris_data, sk_model, enable_mlserver):
with mlflow.start_run() as active_run:
if enable_mlserver:
mlflow.sklearn.log_model(
sk_model, name="model", extra_pip_requirements=["/opt/mlflow", PROTOBUF_REQUIREMENT]
)
else:
mlflow.sklearn.log_model(sk_model, name="model", extra_pip_requirements=["/opt/mlflow"])
model_uri = f"runs:/{active_run.info.run_id}/model"
x, _ = iris_data
df = pd.DataFrame(x)
extra_args = ["--install-mlflow"]
if enable_mlserver:
extra_args.append("--enable-mlserver")
image_name = pyfunc_build_image(
model_uri,
extra_args=extra_args,
env=env_with_tracking_uri(),
)
host_port = get_safe_port()
scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
_validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model, enable_mlserver)
def test_build_docker_virtualenv(iris_data, sk_model):
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
sk_model, name="model", extra_pip_requirements=["/opt/mlflow"]
)
x, _ = iris_data
df = pd.DataFrame(iris_data[0])
extra_args = ["--install-mlflow", "--env-manager", "virtualenv"]
image_name = pyfunc_build_image(
model_info.model_uri,
extra_args=extra_args,
env=env_with_tracking_uri(),
)
host_port = get_safe_port()
scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
_validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model)
@pytest.mark.parametrize("enable_mlserver", [True, False])
def test_build_docker_with_env_override(iris_data, sk_model, enable_mlserver):
with mlflow.start_run() as active_run:
if enable_mlserver:
mlflow.sklearn.log_model(
sk_model, name="model", extra_pip_requirements=["/opt/mlflow", PROTOBUF_REQUIREMENT]
)
else:
mlflow.sklearn.log_model(sk_model, name="model", extra_pip_requirements=["/opt/mlflow"])
model_uri = f"runs:/{active_run.info.run_id}/model"
x, _ = iris_data
df = pd.DataFrame(x)
extra_args = ["--install-mlflow"]
if enable_mlserver:
extra_args.append("--enable-mlserver")
image_name = pyfunc_build_image(
model_uri,
extra_args=extra_args,
env=env_with_tracking_uri(),
)
host_port = get_safe_port()
scoring_proc = pyfunc_serve_from_docker_image_with_env_override(image_name, host_port)
_validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model, enable_mlserver)
def test_build_docker_without_model_uri(iris_data, sk_model, tmp_path):
model_path = tmp_path.joinpath("model")
mlflow.sklearn.save_model(sk_model, model_path, extra_pip_requirements=["/opt/mlflow"])
image_name = pyfunc_build_image(model_uri=None)
host_port = get_safe_port()
scoring_proc = pyfunc_serve_from_docker_image_with_env_override(
image_name,
host_port,
extra_docker_run_options=["-v", f"{model_path}:/opt/ml/model"],
)
x = iris_data[0]
df = pd.DataFrame(x)
_validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model)
def _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model, enable_mlserver=False):
with RestEndpoint(proc=scoring_proc, port=host_port, validate_version=False) as endpoint:
for content_type in [CONTENT_TYPE_JSON, CONTENT_TYPE_CSV]:
scoring_response = endpoint.invoke(df, content_type)
assert scoring_response.status_code == 200, (
f"Failed to serve prediction, got response {scoring_response.text}"
)
np.testing.assert_array_equal(
np.array(json.loads(scoring_response.text)["predictions"]), sk_model.predict(x)
)
# Try examples of bad input, verify we get a non-200 status code
for content_type in [CONTENT_TYPE_JSON, CONTENT_TYPE_CSV, CONTENT_TYPE_JSON]:
scoring_response = endpoint.invoke(data="", content_type=content_type)
expected_status_code = 500 if enable_mlserver else 400
assert scoring_response.status_code == expected_status_code, (
f"Expected server failure with error code {expected_status_code}, "
f"got response with status code {scoring_response.status_code} "
f"and body {scoring_response.text}"
)
if enable_mlserver:
# MLServer returns a different set of errors.
# Skip these assertions until this issue gets tackled:
# https://github.com/SeldonIO/MLServer/issues/360)
continue
scoring_response_dict = json.loads(scoring_response.content)
assert "error_code" in scoring_response_dict
assert scoring_response_dict["error_code"] == ErrorCode.Name(BAD_REQUEST)
assert "message" in scoring_response_dict
def test_env_manager_warning_for_use_of_conda(monkeypatch):
with mock.patch("mlflow.models.cli.get_flavor_backend") as mock_get_flavor_backend:
with pytest.warns(UserWarning, match=r"Use of conda is discouraged"):
CliRunner().invoke(
models_cli.serve,
["--model-uri", "model", "--env-manager", "conda"],
catch_exceptions=False,
)
with warnings.catch_warnings():
warnings.simplefilter("error")
monkeypatch.setenv(MLFLOW_DISABLE_ENV_MANAGER_CONDA_WARNING.name, "TRUE")
CliRunner().invoke(
models_cli.serve,
["--model-uri", "model", "--env-manager", "conda"],
catch_exceptions=False,
)
assert mock_get_flavor_backend.call_count == 2
def test_env_manager_unsupported_value():
with pytest.raises(MlflowException, match=r"Invalid value for `env_manager`"):
CliRunner().invoke(
models_cli.serve,
["--model-uri", "model", "--env-manager", "abc"],
catch_exceptions=False,
)
def test_host_invalid_value():
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model", python_model=MyModel(), registered_model_name="model"
)
with mock.patch(
"mlflow.models.cli.get_flavor_backend",
return_value=PyFuncBackend({}, env_manager=_EnvManager.VIRTUALENV),
):
with pytest.raises(ShellCommandException, match=r"Non-zero exit code: 1"):
CliRunner().invoke(
models_cli.serve,
["--model-uri", model_info.model_uri, "--host", "localhost & echo BUG"],
catch_exceptions=False,
)
def test_change_conda_env_root_location(tmp_path, sk_model):
def _test_model(env_root_path, model_path, sklearn_ver):
env_root_path.mkdir(exist_ok=True)
mlflow.sklearn.save_model(
sk_model, str(model_path), pip_requirements=[f"scikit-learn=={sklearn_ver}"]
)
env = get_flavor_backend(
str(model_path),
env_manager=_EnvManager.CONDA,
install_mlflow=False,
env_root_dir=str(env_root_path),
).prepare_env(model_uri=str(model_path))
conda_env_name = _get_conda_env_name(
str(model_path / "conda.yaml"), env_root_dir=env_root_path
)
env_path = env_root_path / "conda_envs" / conda_env_name
assert env_path.exists()
python_exec_path = str(env_path / "bin" / "python")
# Test execution of command under the correct activated python env.
env.execute(
command=f"python -c \"import sys; assert sys.executable == '{python_exec_path}'; "
f"import sklearn; assert sklearn.__version__ == '{sklearn_ver}'\"",
)
# Cleanup model path and Conda environment to prevent out of space failures on CI
shutil.rmtree(model_path)
shutil.rmtree(env_path)
env_root1_path = tmp_path / "root1"
env_root2_path = tmp_path / "root2"
# Test with model1_path
model1_path = tmp_path / "model1"
_test_model(env_root1_path, model1_path, "1.4.0")
_test_model(env_root2_path, model1_path, "1.4.0")
# Test with model2_path
model2_path = tmp_path / "model2"
_test_model(env_root1_path, model2_path, "1.4.2")
@pytest.mark.parametrize(
("input_schema", "output_schema", "params_schema"),
[(True, False, False), (False, True, False), (False, False, True)],
)
def test_signature_enforcement_with_model_serving(input_schema, output_schema, params_schema):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return ["test"]
input_data = ["test_input"] if input_schema else None
output_data = ["test_output"] if output_schema else None
params = {"test": "test"} if params_schema else None
signature = mlflow.models.infer_signature(
model_input=input_data, model_output=output_data, params=params
)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model", python_model=MyModel(), signature=signature
)
inference_payload = json.dumps({"inputs": ["test"]})
# Serve and score the model
scoring_result = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type=CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
scoring_result.raise_for_status()
# Assert the prediction result
assert json.loads(scoring_result.content)["predictions"] == ["test"]
def assert_base_model_reqs():
"""
Helper function for testing model requirements. Asserts that the
contents of requirements.txt and conda.yaml are as expected, then
returns their filepaths so mutations can be performed.
"""
import cloudpickle
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return ["test"]
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(name="model", python_model=MyModel())
resolved_uri = ModelsArtifactRepository.get_underlying_uri(model_info.model_uri)
local_paths = get_model_requirements_files(resolved_uri)
requirements_txt_file = local_paths.requirements
conda_env_file = local_paths.conda
reqs = _get_requirements_from_file(requirements_txt_file)
assert Requirement(f"mlflow=={mlflow.__version__}") in reqs
assert Requirement(f"cloudpickle=={cloudpickle.__version__}") in reqs
reqs = _get_requirements_from_file(conda_env_file)
assert Requirement(f"mlflow=={mlflow.__version__}") in reqs
assert Requirement(f"cloudpickle=={cloudpickle.__version__}") in reqs
return model_info.model_uri
def test_update_requirements_cli_adds_reqs_successfully():
import cloudpickle
model_uri = assert_base_model_reqs()
CliRunner().invoke(
models_cli.update_pip_requirements,
["-m", f"{model_uri}", "add", "mlflow>=2.9, !=2.9.0", "coolpackage[extra]==8.8.8"],
catch_exceptions=False,
)
resolved_uri = ModelsArtifactRepository.get_underlying_uri(model_uri)
local_paths = get_model_requirements_files(resolved_uri)
# the tool should overwrite mlflow, add coolpackage, and leave cloudpickle alone
reqs = _get_requirements_from_file(local_paths.requirements)
assert Requirement("mlflow!=2.9.0,>=2.9") in reqs
assert Requirement("coolpackage[extra]==8.8.8") in reqs
assert Requirement(f"cloudpickle=={cloudpickle.__version__}") in reqs
reqs = _get_requirements_from_file(local_paths.conda)
assert Requirement("mlflow!=2.9.0,>=2.9") in reqs
assert Requirement("coolpackage[extra]==8.8.8") in reqs
assert Requirement(f"cloudpickle=={cloudpickle.__version__}") in reqs
def test_update_requirements_cli_removes_reqs_successfully():
import cloudpickle
model_uri = assert_base_model_reqs()
CliRunner().invoke(
models_cli.update_pip_requirements,
["-m", f"{model_uri}", "remove", "mlflow"],
catch_exceptions=False,
)
resolved_uri = ModelsArtifactRepository.get_underlying_uri(model_uri)
local_paths = get_model_requirements_files(resolved_uri)
# the tool should remove mlflow and leave cloudpickle alone
reqs = _get_requirements_from_file(local_paths.requirements)
assert reqs == [Requirement(f"cloudpickle=={cloudpickle.__version__}")]
reqs = _get_requirements_from_file(local_paths.conda)
assert reqs == [Requirement(f"cloudpickle=={cloudpickle.__version__}")]
def test_update_requirements_cli_throws_on_incompatible_input():
model_uri = assert_base_model_reqs()
with pytest.raises(
MlflowException, match="The specified requirements versions are incompatible"
):
CliRunner().invoke(
models_cli.update_pip_requirements,
["-m", f"{model_uri}", "add", "mlflow<2.6", "mlflow>2.7"],
catch_exceptions=False,
)
def test_update_model_requirements_add():
import cloudpickle
model_uri = assert_base_model_reqs()
update_model_requirements(
model_uri, "add", ["mlflow>=2.9, !=2.9.0", "coolpackage[extra]==8.8.8"]
)
resolved_uri = ModelsArtifactRepository.get_underlying_uri(model_uri)
local_paths = get_model_requirements_files(resolved_uri)
# the tool should overwrite mlflow, add coolpackage, and leave cloudpickle alone
reqs = _get_requirements_from_file(local_paths.requirements)
assert Requirement("mlflow!=2.9.0,>=2.9") in reqs
assert Requirement("coolpackage[extra]==8.8.8") in reqs
assert Requirement(f"cloudpickle=={cloudpickle.__version__}") in reqs
reqs = _get_requirements_from_file(local_paths.conda)
assert Requirement("mlflow!=2.9.0,>=2.9") in reqs
assert Requirement("coolpackage[extra]==8.8.8") in reqs
assert Requirement(f"cloudpickle=={cloudpickle.__version__}") in reqs
def test_update_model_requirements_remove():
import cloudpickle
model_uri = assert_base_model_reqs()
update_model_requirements(model_uri, "remove", ["mlflow"])
resolved_uri = ModelsArtifactRepository.get_underlying_uri(model_uri)
local_paths = get_model_requirements_files(resolved_uri)
# the tool should remove mlflow and leave cloudpickle alone
reqs = _get_requirements_from_file(local_paths.requirements)
assert reqs == [Requirement(f"cloudpickle=={cloudpickle.__version__}")]
reqs = _get_requirements_from_file(local_paths.conda)
assert reqs == [Requirement(f"cloudpickle=={cloudpickle.__version__}")]
| PredictTestData |
python | google__jax | jax/_src/export/serialization_generated.py | {
"start": 1715,
"end": 6221
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = PyTreeDef()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsPyTreeDef(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# PyTreeDef
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# PyTreeDef
def Kind(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# PyTreeDef
def Children(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
obj = PyTreeDef()
obj.Init(self._tab.Bytes, x)
return obj
return None
# PyTreeDef
def ChildrenLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PyTreeDef
def ChildrenIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# PyTreeDef
def ChildrenNames(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# PyTreeDef
def ChildrenNamesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PyTreeDef
def ChildrenNamesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
# PyTreeDef
def CustomName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# PyTreeDef
def CustomAuxdata(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# PyTreeDef
def CustomAuxdataAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int8Flags, o)
return 0
# PyTreeDef
def CustomAuxdataLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PyTreeDef
def CustomAuxdataIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
return o == 0
def PyTreeDefStart(builder):
builder.StartObject(5)
def PyTreeDefAddKind(builder, kind):
builder.PrependInt8Slot(0, kind, 0)
def PyTreeDefAddChildren(builder, children):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(children), 0)
def PyTreeDefStartChildrenVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def PyTreeDefAddChildrenNames(builder, childrenNames):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(childrenNames), 0)
def PyTreeDefStartChildrenNamesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def PyTreeDefAddCustomName(builder, customName):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(customName), 0)
def PyTreeDefAddCustomAuxdata(builder, customAuxdata):
builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(customAuxdata), 0)
def PyTreeDefStartCustomAuxdataVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def PyTreeDefEnd(builder):
return builder.EndObject()
| PyTreeDef |
python | pytorch__pytorch | torch/nn/modules/normalization.py | {
"start": 8254,
"end": 11769
} | class ____(Module):
r"""Applies Group Normalization over a mini-batch of inputs.
This layer implements the operation as described in
the paper `Group Normalization <https://arxiv.org/abs/1803.08494>`__
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The input channels are separated into :attr:`num_groups` groups, each containing
``num_channels / num_groups`` channels. :attr:`num_channels` must be divisible by
:attr:`num_groups`. The mean and standard-deviation are calculated
separately over each group. :math:`\gamma` and :math:`\beta` are learnable
per-channel affine transform parameter vectors of size :attr:`num_channels` if
:attr:`affine` is ``True``.
The variance is calculated via the biased estimator, equivalent to
`torch.var(input, correction=0)`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, C, *)` where :math:`C=\text{num\_channels}`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = nn.GroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = nn.GroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = nn.GroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
"""
__constants__ = ["num_groups", "num_channels", "eps", "affine"]
num_groups: int
num_channels: int
eps: float
affine: bool
def __init__(
self,
num_groups: int,
num_channels: int,
eps: float = 1e-5,
affine: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
if num_channels % num_groups != 0:
raise ValueError(
f"num_channels ({num_channels}) must be divisible by num_groups ({num_groups})"
)
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.empty(num_channels, **factory_kwargs))
self.bias = Parameter(torch.empty(num_channels, **factory_kwargs))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input: Tensor) -> Tensor:
return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
def extra_repr(self) -> str:
return "{num_groups}, {num_channels}, eps={eps}, affine={affine}".format(
**self.__dict__
)
| GroupNorm |
python | huggingface__transformers | src/transformers/models/splinter/modeling_splinter.py | {
"start": 6845,
"end": 7557
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.align.modeling_align.AlignTextAttention with AlignText->Splinter
| SplinterSelfOutput |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-maps/llama_index/readers/maps/base.py | {
"start": 277,
"end": 4594
} | class ____(BaseReader):
"""
OpenMap Reader.
Get the map Features from the overpass api(osm) for the given location/area
Args:
localarea(str) - Area or location you are searching for
tag_values(str) - filter for the give area
search_tag(str) - Tag that you are looking for
if you not sure about the search_tag and tag_values visit https://taginfo.openstreetmap.org/tags
remove_keys(list) - list of keys that need to be removed from the response
by default following keys will be removed ['nodes','geometry','members']
"""
def __init__(self) -> None:
"""Initialize with parameters."""
super().__init__()
@staticmethod
def _get_user() -> str:
# choose from all lowercase letter
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(10))
@staticmethod
def _get_latlon(locarea: str, user_agent: str) -> tuple:
try:
from geopy.geocoders import Nominatim
except ImportError:
raise ImportError("install geopy using `pip3 install geopy`")
geolocator = Nominatim(user_agent=user_agent)
location = geolocator.geocode(locarea)
return (location.latitude, location.longitude) if location else (None, None)
def load_data(
self,
localarea: str,
search_tag: Optional[str] = "amenity",
remove_keys: Optional[List] = ["nodes", "geometry", "members"],
tag_only: Optional[bool] = True,
tag_values: Optional[List] = [""],
local_area_buffer: Optional[int] = 2000,
) -> List[Document]:
"""
This loader will bring you the all the node values from the open street maps for the given location.
Args:
localarea(str) - Area or location you are searching for
search_tag(str) - Tag that you are looking for
if you not sure about the search_tag and tag_values visit https://taginfo.openstreetmap.org/tags
remove_keys(list) - list of keys that need to be removed from the response
by default it those keys will be removed ['nodes','geometry','members']
tag_only(bool) - if True it return the nodes which has tags if False returns all the nodes
tag_values(str) - filter for the give area
local_area_buffer(int) - range that you wish to cover (Default 2000(2km))
"""
try:
from osmxtract import location, overpass
from osmxtract.errors import OverpassBadRequest
except ImportError:
raise ImportError("install osmxtract using `pip3 install osmxtract`")
null_list = ["", "null", "none", None]
extra_info = {}
local_area = localarea
if local_area.lower().strip() in null_list:
raise Exception("The Area should not be null")
user = self._get_user()
lat, lon = self._get_latlon(local_area, user)
try:
bounds = location.from_buffer(lat, lon, buffer_size=int(local_area_buffer))
except TypeError:
raise TypeError("Please give valid location name or check for spelling")
# overpass query generation and execution
tag_values = [str(i).lower().strip() for i in tag_values]
query = overpass.ql_query(
bounds, tag=search_tag.lower(), values=tag_values, timeout=500
)
extra_info["overpass_query"] = query
try:
response = overpass.request(query)
except OverpassBadRequest:
raise TypeError(
f"Error while executing the Query {query} please check the Args"
)
res = response["elements"]
_meta = response.copy()
del _meta["elements"]
extra_info["overpass_meta"] = str(_meta)
extra_info["lat"] = lat
extra_info["lon"] = lon
# filtering for only the tag values
filtered = [i for i in res if "tags" in i] if tag_only else res
for key in remove_keys:
[i.pop(key, None) for i in filtered]
if filtered:
return Document(text=str(filtered), extra_info=extra_info)
else:
return Document(text=str(res), extra_info=extra_info)
| OpenMap |
python | langchain-ai__langchain | libs/core/tests/unit_tests/output_parsers/test_openai_tools.py | {
"start": 22683,
"end": 42438
} | class ____(BaseModel):
"""record names of all people mentioned."""
names: list[str] = Field(..., description="all names mentioned")
person: Person = Field(..., description="info about the main subject")
# Expected to change when we support more granular pydantic streaming.
EXPECTED_STREAMED_PYDANTIC = [
NameCollector(
names=["suzy", "jermaine", "alex"],
person=Person(age=39, hair_color="brown", job="c"),
),
NameCollector(
names=["suzy", "jermaine", "alex"],
person=Person(age=39, hair_color="brown", job="concie"),
),
NameCollector(
names=["suzy", "jermaine", "alex"],
person=Person(age=39, hair_color="brown", job="concierge"),
),
]
def test_partial_pydantic_output_parser() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_iter(use_tool_calls=use_tool_calls)
chain = input_iter | PydanticToolsParser(
tools=[NameCollector], first_tool_only=True
)
actual = list(chain.stream(None))
assert actual == EXPECTED_STREAMED_PYDANTIC
async def test_partial_pydantic_output_parser_async() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_aiter(use_tool_calls=use_tool_calls)
chain = input_iter | PydanticToolsParser(
tools=[NameCollector], first_tool_only=True
)
actual = [p async for p in chain.astream(None)]
assert actual == EXPECTED_STREAMED_PYDANTIC
def test_parse_with_different_pydantic_2_v1() -> None:
"""Test with pydantic.v1.BaseModel from pydantic 2."""
class Forecast(pydantic.v1.BaseModel):
temperature: int
forecast: str
# Can't get pydantic to work here due to the odd typing of tryig to support
# both v1 and v2 in the same codebase.
parser = PydanticToolsParser(tools=[Forecast])
message = AIMessage(
content="",
tool_calls=[
{
"id": "call_OwL7f5PE",
"name": "Forecast",
"args": {"temperature": 20, "forecast": "Sunny"},
}
],
)
generation = ChatGeneration(
message=message,
)
assert parser.parse_result([generation]) == [
Forecast(
temperature=20,
forecast="Sunny",
)
]
def test_parse_with_different_pydantic_2_proper() -> None:
"""Test with pydantic.BaseModel from pydantic 2."""
class Forecast(BaseModel):
temperature: int
forecast: str
# Can't get pydantic to work here due to the odd typing of tryig to support
# both v1 and v2 in the same codebase.
parser = PydanticToolsParser(tools=[Forecast])
message = AIMessage(
content="",
tool_calls=[
{
"id": "call_OwL7f5PE",
"name": "Forecast",
"args": {"temperature": 20, "forecast": "Sunny"},
}
],
)
generation = ChatGeneration(
message=message,
)
assert parser.parse_result([generation]) == [
Forecast(
temperature=20,
forecast="Sunny",
)
]
def test_max_tokens_error(caplog: Any) -> None:
parser = PydanticToolsParser(tools=[NameCollector], first_tool_only=True)
message = AIMessage(
content="",
tool_calls=[
{
"id": "call_OwL7f5PE",
"name": "NameCollector",
"args": {"names": ["suz", "jerm"]},
}
],
response_metadata={"stop_reason": "max_tokens"},
)
with pytest.raises(ValidationError):
_ = parser.invoke(message)
assert any(
"`max_tokens` stop reason" in msg and record.levelname == "ERROR"
for record, msg in zip(caplog.records, caplog.messages, strict=False)
)
def test_pydantic_tools_parser_with_mixed_pydantic_versions() -> None:
"""Test PydanticToolsParser with both Pydantic v1 and v2 models."""
# For Python 3.14+ compatibility, use create_model for Pydantic v1
if sys.version_info >= (3, 14):
WeatherV1 = pydantic.v1.create_model( # noqa: N806
"WeatherV1",
__doc__="Weather information using Pydantic v1.",
temperature=(int, ...),
conditions=(str, ...),
)
else:
class WeatherV1(pydantic.v1.BaseModel):
"""Weather information using Pydantic v1."""
temperature: int
conditions: str
class LocationV2(BaseModel):
"""Location information using Pydantic v2."""
city: str
country: str
# Test with Pydantic v1 model
parser_v1 = PydanticToolsParser(tools=[WeatherV1])
message_v1 = AIMessage(
content="",
tool_calls=[
{
"id": "call_weather",
"name": "WeatherV1",
"args": {"temperature": 25, "conditions": "sunny"},
}
],
)
generation_v1 = ChatGeneration(message=message_v1)
result_v1 = parser_v1.parse_result([generation_v1])
assert len(result_v1) == 1
assert isinstance(result_v1[0], WeatherV1)
assert result_v1[0].temperature == 25 # type: ignore[attr-defined,unused-ignore]
assert result_v1[0].conditions == "sunny" # type: ignore[attr-defined,unused-ignore]
# Test with Pydantic v2 model
parser_v2 = PydanticToolsParser(tools=[LocationV2])
message_v2 = AIMessage(
content="",
tool_calls=[
{
"id": "call_location",
"name": "LocationV2",
"args": {"city": "Paris", "country": "France"},
}
],
)
generation_v2 = ChatGeneration(message=message_v2)
result_v2 = parser_v2.parse_result([generation_v2])
assert len(result_v2) == 1
assert isinstance(result_v2[0], LocationV2)
assert result_v2[0].city == "Paris"
assert result_v2[0].country == "France"
# Test with both v1 and v2 models
parser_mixed = PydanticToolsParser(tools=[WeatherV1, LocationV2])
message_mixed = AIMessage(
content="",
tool_calls=[
{
"id": "call_weather",
"name": "WeatherV1",
"args": {"temperature": 20, "conditions": "cloudy"},
},
{
"id": "call_location",
"name": "LocationV2",
"args": {"city": "London", "country": "UK"},
},
],
)
generation_mixed = ChatGeneration(message=message_mixed)
result_mixed = parser_mixed.parse_result([generation_mixed])
assert len(result_mixed) == 2
assert isinstance(result_mixed[0], WeatherV1)
assert result_mixed[0].temperature == 20 # type: ignore[attr-defined,unused-ignore]
assert isinstance(result_mixed[1], LocationV2)
assert result_mixed[1].city == "London"
def test_pydantic_tools_parser_with_custom_title() -> None:
"""Test PydanticToolsParser with Pydantic v2 model using custom title."""
class CustomTitleTool(BaseModel):
"""Tool with custom title in model config."""
model_config = {"title": "MyCustomToolName"}
value: int
description: str
# Test with custom title - tool should be callable by custom name
parser = PydanticToolsParser(tools=[CustomTitleTool])
message = AIMessage(
content="",
tool_calls=[
{
"id": "call_custom",
"name": "MyCustomToolName",
"args": {"value": 42, "description": "test"},
}
],
)
generation = ChatGeneration(message=message)
result = parser.parse_result([generation])
assert len(result) == 1
assert isinstance(result[0], CustomTitleTool)
assert result[0].value == 42
assert result[0].description == "test"
def test_pydantic_tools_parser_name_dict_fallback() -> None:
"""Test that name_dict properly falls back to __name__ when title is None."""
class ToolWithoutTitle(BaseModel):
"""Tool without explicit title."""
data: str
# Ensure model_config doesn't have a title or it's None
# (This is the default behavior)
parser = PydanticToolsParser(tools=[ToolWithoutTitle])
message = AIMessage(
content="",
tool_calls=[
{
"id": "call_no_title",
"name": "ToolWithoutTitle",
"args": {"data": "test_data"},
}
],
)
generation = ChatGeneration(message=message)
result = parser.parse_result([generation])
assert len(result) == 1
assert isinstance(result[0], ToolWithoutTitle)
assert result[0].data == "test_data"
def test_pydantic_tools_parser_with_nested_models() -> None:
"""Test PydanticToolsParser with nested Pydantic v1 and v2 models."""
# Nested v1 models
if sys.version_info >= (3, 14):
AddressV1 = pydantic.v1.create_model( # noqa: N806
"AddressV1",
__doc__="Address using Pydantic v1.",
street=(str, ...),
city=(str, ...),
zip_code=(str, ...),
)
PersonV1 = pydantic.v1.create_model( # noqa: N806
"PersonV1",
__doc__="Person with nested address using Pydantic v1.",
name=(str, ...),
age=(int, ...),
address=(AddressV1, ...),
)
else:
class AddressV1(pydantic.v1.BaseModel):
"""Address using Pydantic v1."""
street: str
city: str
zip_code: str
class PersonV1(pydantic.v1.BaseModel):
"""Person with nested address using Pydantic v1."""
name: str
age: int
address: AddressV1
# Nested v2 models
class CoordinatesV2(BaseModel):
"""Coordinates using Pydantic v2."""
latitude: float
longitude: float
class LocationV2(BaseModel):
"""Location with nested coordinates using Pydantic v2."""
name: str
coordinates: CoordinatesV2
# Test with nested Pydantic v1 model
parser_v1 = PydanticToolsParser(tools=[PersonV1])
message_v1 = AIMessage(
content="",
tool_calls=[
{
"id": "call_person",
"name": "PersonV1",
"args": {
"name": "Alice",
"age": 30,
"address": {
"street": "123 Main St",
"city": "Springfield",
"zip_code": "12345",
},
},
}
],
)
generation_v1 = ChatGeneration(message=message_v1)
result_v1 = parser_v1.parse_result([generation_v1])
assert len(result_v1) == 1
assert isinstance(result_v1[0], PersonV1)
assert result_v1[0].name == "Alice" # type: ignore[attr-defined,unused-ignore]
assert result_v1[0].age == 30 # type: ignore[attr-defined,unused-ignore]
assert isinstance(result_v1[0].address, AddressV1) # type: ignore[attr-defined,unused-ignore]
assert result_v1[0].address.street == "123 Main St" # type: ignore[attr-defined,unused-ignore]
assert result_v1[0].address.city == "Springfield" # type: ignore[attr-defined,unused-ignore]
# Test with nested Pydantic v2 model
parser_v2 = PydanticToolsParser(tools=[LocationV2])
message_v2 = AIMessage(
content="",
tool_calls=[
{
"id": "call_location",
"name": "LocationV2",
"args": {
"name": "Eiffel Tower",
"coordinates": {"latitude": 48.8584, "longitude": 2.2945},
},
}
],
)
generation_v2 = ChatGeneration(message=message_v2)
result_v2 = parser_v2.parse_result([generation_v2])
assert len(result_v2) == 1
assert isinstance(result_v2[0], LocationV2)
assert result_v2[0].name == "Eiffel Tower"
assert isinstance(result_v2[0].coordinates, CoordinatesV2)
assert result_v2[0].coordinates.latitude == 48.8584
assert result_v2[0].coordinates.longitude == 2.2945
# Test with both nested models in one message
parser_mixed = PydanticToolsParser(tools=[PersonV1, LocationV2])
message_mixed = AIMessage(
content="",
tool_calls=[
{
"id": "call_person",
"name": "PersonV1",
"args": {
"name": "Bob",
"age": 25,
"address": {
"street": "456 Oak Ave",
"city": "Portland",
"zip_code": "97201",
},
},
},
{
"id": "call_location",
"name": "LocationV2",
"args": {
"name": "Golden Gate Bridge",
"coordinates": {"latitude": 37.8199, "longitude": -122.4783},
},
},
],
)
generation_mixed = ChatGeneration(message=message_mixed)
result_mixed = parser_mixed.parse_result([generation_mixed])
assert len(result_mixed) == 2
assert isinstance(result_mixed[0], PersonV1)
assert result_mixed[0].name == "Bob" # type: ignore[attr-defined,unused-ignore]
assert result_mixed[0].address.city == "Portland" # type: ignore[attr-defined,unused-ignore]
assert isinstance(result_mixed[1], LocationV2)
assert result_mixed[1].name == "Golden Gate Bridge"
assert result_mixed[1].coordinates.latitude == 37.8199
def test_pydantic_tools_parser_with_optional_fields() -> None:
"""Test PydanticToolsParser with optional fields in v1 and v2 models."""
if sys.version_info >= (3, 14):
ProductV1 = pydantic.v1.create_model( # noqa: N806
"ProductV1",
__doc__="Product with optional fields using Pydantic v1.",
name=(str, ...),
price=(float, ...),
description=(str | None, None),
stock=(int, 0),
)
else:
class ProductV1(pydantic.v1.BaseModel):
"""Product with optional fields using Pydantic v1."""
name: str
price: float
description: str | None = None
stock: int = 0
# v2 model with optional fields
class UserV2(BaseModel):
"""User with optional fields using Pydantic v2."""
username: str
email: str
bio: str | None = None
age: int | None = None
# Test v1 with all fields provided
parser_v1_full = PydanticToolsParser(tools=[ProductV1])
message_v1_full = AIMessage(
content="",
tool_calls=[
{
"id": "call_product_full",
"name": "ProductV1",
"args": {
"name": "Laptop",
"price": 999.99,
"description": "High-end laptop",
"stock": 50,
},
}
],
)
generation_v1_full = ChatGeneration(message=message_v1_full)
result_v1_full = parser_v1_full.parse_result([generation_v1_full])
assert len(result_v1_full) == 1
assert isinstance(result_v1_full[0], ProductV1)
assert result_v1_full[0].name == "Laptop" # type: ignore[attr-defined,unused-ignore]
assert result_v1_full[0].price == 999.99 # type: ignore[attr-defined,unused-ignore]
assert result_v1_full[0].description == "High-end laptop" # type: ignore[attr-defined,unused-ignore]
assert result_v1_full[0].stock == 50 # type: ignore[attr-defined,unused-ignore]
# Test v1 with only required fields
parser_v1_minimal = PydanticToolsParser(tools=[ProductV1])
message_v1_minimal = AIMessage(
content="",
tool_calls=[
{
"id": "call_product_minimal",
"name": "ProductV1",
"args": {"name": "Mouse", "price": 29.99},
}
],
)
generation_v1_minimal = ChatGeneration(message=message_v1_minimal)
result_v1_minimal = parser_v1_minimal.parse_result([generation_v1_minimal])
assert len(result_v1_minimal) == 1
assert isinstance(result_v1_minimal[0], ProductV1)
assert result_v1_minimal[0].name == "Mouse" # type: ignore[attr-defined,unused-ignore]
assert result_v1_minimal[0].price == 29.99 # type: ignore[attr-defined,unused-ignore]
assert result_v1_minimal[0].description is None # type: ignore[attr-defined,unused-ignore]
assert result_v1_minimal[0].stock == 0 # type: ignore[attr-defined,unused-ignore]
# Test v2 with all fields provided
parser_v2_full = PydanticToolsParser(tools=[UserV2])
message_v2_full = AIMessage(
content="",
tool_calls=[
{
"id": "call_user_full",
"name": "UserV2",
"args": {
"username": "john_doe",
"email": "john@example.com",
"bio": "Software developer",
"age": 28,
},
}
],
)
generation_v2_full = ChatGeneration(message=message_v2_full)
result_v2_full = parser_v2_full.parse_result([generation_v2_full])
assert len(result_v2_full) == 1
assert isinstance(result_v2_full[0], UserV2)
assert result_v2_full[0].username == "john_doe"
assert result_v2_full[0].email == "john@example.com"
assert result_v2_full[0].bio == "Software developer"
assert result_v2_full[0].age == 28
# Test v2 with only required fields
parser_v2_minimal = PydanticToolsParser(tools=[UserV2])
message_v2_minimal = AIMessage(
content="",
tool_calls=[
{
"id": "call_user_minimal",
"name": "UserV2",
"args": {"username": "jane_smith", "email": "jane@example.com"},
}
],
)
generation_v2_minimal = ChatGeneration(message=message_v2_minimal)
result_v2_minimal = parser_v2_minimal.parse_result([generation_v2_minimal])
assert len(result_v2_minimal) == 1
assert isinstance(result_v2_minimal[0], UserV2)
assert result_v2_minimal[0].username == "jane_smith"
assert result_v2_minimal[0].email == "jane@example.com"
assert result_v2_minimal[0].bio is None
assert result_v2_minimal[0].age is None
# Test mixed v1 and v2 with partial optional fields
parser_mixed = PydanticToolsParser(tools=[ProductV1, UserV2])
message_mixed = AIMessage(
content="",
tool_calls=[
{
"id": "call_product",
"name": "ProductV1",
"args": {"name": "Keyboard", "price": 79.99, "stock": 100},
},
{
"id": "call_user",
"name": "UserV2",
"args": {
"username": "alice",
"email": "alice@example.com",
"age": 35,
},
},
],
)
generation_mixed = ChatGeneration(message=message_mixed)
result_mixed = parser_mixed.parse_result([generation_mixed])
assert len(result_mixed) == 2
assert isinstance(result_mixed[0], ProductV1)
assert result_mixed[0].name == "Keyboard" # type: ignore[attr-defined,unused-ignore]
assert result_mixed[0].description is None # type: ignore[attr-defined,unused-ignore]
assert result_mixed[0].stock == 100 # type: ignore[attr-defined,unused-ignore]
assert isinstance(result_mixed[1], UserV2)
assert result_mixed[1].username == "alice"
assert result_mixed[1].bio is None
assert result_mixed[1].age == 35
| NameCollector |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 529077,
"end": 529400
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(ProjectV2SortBy, graphql_name="node")
| ProjectV2SortByEdge |
python | kamyu104__LeetCode-Solutions | Python/minimum-deletions-to-make-array-divisible.py | {
"start": 68,
"end": 585
} | class ____(object):
def minOperations(self, nums, numsDivide):
"""
:type nums: List[int]
:type numsDivide: List[int]
:rtype: int
"""
def gcd(a, b): # Time: O(log(min(a, b)))
while b:
a, b = b, a%b
return a
g = reduce(gcd, numsDivide)
mn = float("inf")
for x in nums:
if g%x == 0:
mn = min(mn, x)
return sum(x < mn for x in nums) if mn != float("inf") else -1
| Solution |
python | zarr-developers__zarr-python | src/zarr/api/asynchronous.py | {
"start": 3242,
"end": 49204
} | class ____(TypedDict):
shape: NotRequired[tuple[int, ...]]
chunks: NotRequired[tuple[int, ...]]
dtype: NotRequired[np.dtype[np.generic]]
order: NotRequired[Literal["C", "F"]]
filters: NotRequired[tuple[Numcodec, ...] | None]
compressor: NotRequired[CompressorLikev2]
codecs: NotRequired[tuple[Codec, ...]]
def _like_args(a: ArrayLike) -> _LikeArgs:
"""Set default values for shape and chunks if they are not present in the array-like object"""
new: _LikeArgs = {}
shape, chunks = _get_shape_chunks(a)
if shape is not None:
new["shape"] = shape
if chunks is not None:
new["chunks"] = chunks
if hasattr(a, "dtype"):
new["dtype"] = a.dtype
if isinstance(a, AsyncArray | Array):
if isinstance(a.metadata, ArrayV2Metadata):
new["order"] = a.order
new["compressor"] = a.metadata.compressor
new["filters"] = a.metadata.filters
else:
# TODO: Remove type: ignore statement when type inference improves.
# mypy cannot correctly infer the type of a.metadata here for some reason.
new["codecs"] = a.metadata.codecs
else:
# TODO: set default values compressor/codecs
# to do this, we may need to evaluate if this is a v2 or v3 array
# new["compressor"] = "default"
pass
return new
def _handle_zarr_version_or_format(
*, zarr_version: ZarrFormat | None, zarr_format: ZarrFormat | None
) -> ZarrFormat | None:
"""Handle the deprecated zarr_version kwarg and return zarr_format"""
if zarr_format is not None and zarr_version is not None and zarr_format != zarr_version:
raise ValueError(
f"zarr_format {zarr_format} does not match zarr_version {zarr_version}, please only set one"
)
if zarr_version is not None:
warnings.warn(
"zarr_version is deprecated, use zarr_format", ZarrDeprecationWarning, stacklevel=2
)
return zarr_version
return zarr_format
async def consolidate_metadata(
store: StoreLike,
path: str | None = None,
zarr_format: ZarrFormat | None = None,
) -> AsyncGroup:
"""
Consolidate the metadata of all nodes in a hierarchy.
Upon completion, the metadata of the root node in the Zarr hierarchy will be
updated to include all the metadata of child nodes. For Stores that do
not support consolidated metadata, this operation raises a ``TypeError``.
Parameters
----------
store : StoreLike
The store-like object whose metadata you wish to consolidate. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
path : str, optional
A path to a group in the store to consolidate at. Only children
below that group will be consolidated.
By default, the root node is used so all the metadata in the
store is consolidated.
zarr_format : {2, 3, None}, optional
The zarr format of the hierarchy. By default the zarr format
is inferred.
Returns
-------
group: AsyncGroup
The group, with the ``consolidated_metadata`` field set to include
the metadata of each child node. If the Store doesn't support
consolidated metadata, this function raises a `TypeError`.
See ``Store.supports_consolidated_metadata``.
"""
store_path = await make_store_path(store, path=path)
if not store_path.store.supports_consolidated_metadata:
store_name = type(store_path.store).__name__
raise TypeError(
f"The Zarr Store in use ({store_name}) doesn't support consolidated metadata",
)
group = await AsyncGroup.open(store_path, zarr_format=zarr_format, use_consolidated=False)
group.store_path.store._check_writable()
members_metadata = {
k: v.metadata
async for k, v in group.members(max_depth=None, use_consolidated_for_children=False)
}
# While consolidating, we want to be explicit about when child groups
# are empty by inserting an empty dict for consolidated_metadata.metadata
for k, v in members_metadata.items():
if isinstance(v, GroupMetadata) and v.consolidated_metadata is None:
v = dataclasses.replace(v, consolidated_metadata=ConsolidatedMetadata(metadata={}))
members_metadata[k] = v
if any(m.zarr_format == 3 for m in members_metadata.values()):
warnings.warn(
"Consolidated metadata is currently not part in the Zarr format 3 specification. It "
"may not be supported by other zarr implementations and may change in the future.",
category=ZarrUserWarning,
stacklevel=1,
)
ConsolidatedMetadata._flat_to_nested(members_metadata)
consolidated_metadata = ConsolidatedMetadata(metadata=members_metadata)
metadata = dataclasses.replace(group.metadata, consolidated_metadata=consolidated_metadata)
group = dataclasses.replace(
group,
metadata=metadata,
)
await group._save_metadata()
return group
async def copy(*args: Any, **kwargs: Any) -> tuple[int, int, int]:
"""
Not implemented.
"""
raise NotImplementedError
async def copy_all(*args: Any, **kwargs: Any) -> tuple[int, int, int]:
"""
Not implemented.
"""
raise NotImplementedError
async def copy_store(*args: Any, **kwargs: Any) -> tuple[int, int, int]:
"""
Not implemented.
"""
raise NotImplementedError
async def load(
*,
store: StoreLike,
path: str | None = None,
zarr_format: ZarrFormat | None = None,
zarr_version: ZarrFormat | None = None,
) -> NDArrayLikeOrScalar | dict[str, NDArrayLikeOrScalar]:
"""Load data from an array or group into memory.
Parameters
----------
store : StoreLike
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
path : str or None, optional
The path within the store from which to load.
Returns
-------
out
If the path contains an array, out will be a numpy array. If the path contains
a group, out will be a dict-like object where keys are array names and values
are numpy arrays.
See Also
--------
save
Notes
-----
If loading data from a group of arrays, data will not be immediately loaded into
memory. Rather, arrays will be loaded into memory as they are requested.
"""
zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format)
obj = await open(store=store, path=path, zarr_format=zarr_format)
if isinstance(obj, AsyncArray):
return await obj.getitem(slice(None))
else:
raise NotImplementedError("loading groups not yet supported")
async def open(
*,
store: StoreLike | None = None,
mode: AccessModeLiteral | None = None,
zarr_version: ZarrFormat | None = None, # deprecated
zarr_format: ZarrFormat | None = None,
path: str | None = None,
storage_options: dict[str, Any] | None = None,
**kwargs: Any, # TODO: type kwargs as valid args to open_array
) -> AnyAsyncArray | AsyncGroup:
"""Convenience function to open a group or array using file-mode-like semantics.
Parameters
----------
store : StoreLike or None, default=None
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
If the store is read-only, the default is 'r'; otherwise, it is 'a'.
zarr_format : {2, 3, None}, optional
The zarr format to use when saving.
path : str or None, optional
The path within the store to open.
storage_options : dict
If using an fsspec URL to create the store, these will be passed to
the backend implementation. Ignored otherwise.
**kwargs
Additional parameters are passed through to [`zarr.creation.open_array`][] or
[`open_group`][zarr.api.asynchronous.open_group].
Returns
-------
z : array or group
Return type depends on what exists in the given store.
"""
zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format)
if mode is None:
if isinstance(store, (Store, StorePath)) and store.read_only:
mode = "r"
else:
mode = "a"
store_path = await make_store_path(store, mode=mode, path=path, storage_options=storage_options)
# TODO: the mode check below seems wrong!
if "shape" not in kwargs and mode in {"a", "r", "r+", "w"}:
try:
metadata_dict = await get_array_metadata(store_path, zarr_format=zarr_format)
# TODO: remove this cast when we fix typing for array metadata dicts
_metadata_dict = cast("ArrayMetadataDict", metadata_dict)
# for v2, the above would already have raised an exception if not an array
zarr_format = _metadata_dict["zarr_format"]
is_v3_array = zarr_format == 3 and _metadata_dict.get("node_type") == "array"
if is_v3_array or zarr_format == 2:
return AsyncArray(
store_path=store_path, metadata=_metadata_dict, config=kwargs.get("config")
)
except (AssertionError, FileNotFoundError, NodeTypeValidationError):
pass
return await open_group(store=store_path, zarr_format=zarr_format, mode=mode, **kwargs)
try:
return await open_array(store=store_path, zarr_format=zarr_format, mode=mode, **kwargs)
except (KeyError, NodeTypeValidationError):
# KeyError for a missing key
# NodeTypeValidationError for failing to parse node metadata as an array when it's
# actually a group
return await open_group(store=store_path, zarr_format=zarr_format, mode=mode, **kwargs)
async def open_consolidated(
*args: Any, use_consolidated: Literal[True] = True, **kwargs: Any
) -> AsyncGroup:
"""
Alias for [`open_group`][zarr.api.asynchronous.open_group] with ``use_consolidated=True``.
"""
if use_consolidated is not True:
raise TypeError(
"'use_consolidated' must be 'True' in 'open_consolidated'. Use 'open' with "
"'use_consolidated=False' to bypass consolidated metadata."
)
return await open_group(*args, use_consolidated=use_consolidated, **kwargs)
async def save(
store: StoreLike,
*args: NDArrayLike,
zarr_version: ZarrFormat | None = None, # deprecated
zarr_format: ZarrFormat | None = None,
path: str | None = None,
**kwargs: Any, # TODO: type kwargs as valid args to save
) -> None:
"""Convenience function to save an array or group of arrays to the local file system.
Parameters
----------
store : StoreLike
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
*args : ndarray
NumPy arrays with data to save.
zarr_format : {2, 3, None}, optional
The zarr format to use when saving.
path : str or None, optional
The path within the group where the arrays will be saved.
**kwargs
NumPy arrays with data to save.
"""
zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format)
if len(args) == 0 and len(kwargs) == 0:
raise ValueError("at least one array must be provided")
if len(args) == 1 and len(kwargs) == 0:
await save_array(store, args[0], zarr_format=zarr_format, path=path)
else:
await save_group(store, *args, zarr_format=zarr_format, path=path, **kwargs)
async def save_array(
store: StoreLike,
arr: NDArrayLike,
*,
zarr_version: ZarrFormat | None = None, # deprecated
zarr_format: ZarrFormat | None = None,
path: str | None = None,
storage_options: dict[str, Any] | None = None,
**kwargs: Any, # TODO: type kwargs as valid args to create
) -> None:
"""Convenience function to save a NumPy array to the local file system, following a
similar API to the NumPy save() function.
Parameters
----------
store : StoreLike
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
arr : ndarray
NumPy array with data to save.
zarr_format : {2, 3, None}, optional
The zarr format to use when saving. The default is ``None``, which will
use the default Zarr format defined in the global configuration object.
path : str or None, optional
The path within the store where the array will be saved.
storage_options : dict
If using an fsspec URL to create the store, these will be passed to
the backend implementation. Ignored otherwise.
**kwargs
Passed through to [`create`][zarr.api.asynchronous.create], e.g., compressor.
"""
zarr_format = (
_handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format)
or _default_zarr_format()
)
if not isinstance(arr, NDArrayLike):
raise TypeError("arr argument must be numpy or other NDArrayLike array")
mode = kwargs.pop("mode", "a")
store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options)
if np.isscalar(arr):
arr = np.array(arr)
shape = arr.shape
chunks = getattr(arr, "chunks", None) # for array-likes with chunks attribute
overwrite = kwargs.pop("overwrite", None) or _infer_overwrite(mode)
zarr_dtype = get_data_type_from_native_dtype(arr.dtype)
new = await AsyncArray._create(
store_path,
zarr_format=zarr_format,
shape=shape,
dtype=zarr_dtype,
chunks=chunks,
overwrite=overwrite,
**kwargs,
)
await new.setitem(slice(None), arr)
async def save_group(
store: StoreLike,
*args: NDArrayLike,
zarr_version: ZarrFormat | None = None, # deprecated
zarr_format: ZarrFormat | None = None,
path: str | None = None,
storage_options: dict[str, Any] | None = None,
**kwargs: NDArrayLike,
) -> None:
"""Convenience function to save several NumPy arrays to the local file system, following a
similar API to the NumPy savez()/savez_compressed() functions.
Parameters
----------
store : StoreLike
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
*args : ndarray
NumPy arrays with data to save.
zarr_format : {2, 3, None}, optional
The zarr format to use when saving.
path : str or None, optional
Path within the store where the group will be saved.
storage_options : dict
If using an fsspec URL to create the store, these will be passed to
the backend implementation. Ignored otherwise.
**kwargs
NumPy arrays with data to save.
"""
store_path = await make_store_path(store, path=path, mode="w", storage_options=storage_options)
zarr_format = (
_handle_zarr_version_or_format(
zarr_version=zarr_version,
zarr_format=zarr_format,
)
or _default_zarr_format()
)
for arg in args:
if not isinstance(arg, NDArrayLike):
raise TypeError(
"All arguments must be numpy or other NDArrayLike arrays (except store, path, storage_options, and zarr_format)"
)
for k, v in kwargs.items():
if not isinstance(v, NDArrayLike):
raise TypeError(f"Keyword argument '{k}' must be a numpy or other NDArrayLike array")
if len(args) == 0 and len(kwargs) == 0:
raise ValueError("at least one array must be provided")
aws = []
for i, arr in enumerate(args):
aws.append(
save_array(
store_path,
arr,
zarr_format=zarr_format,
path=f"arr_{i}",
storage_options=storage_options,
)
)
for k, arr in kwargs.items():
aws.append(save_array(store_path, arr, zarr_format=zarr_format, path=k))
await asyncio.gather(*aws)
@deprecated("Use AsyncGroup.tree instead.", category=ZarrDeprecationWarning)
async def tree(grp: AsyncGroup, expand: bool | None = None, level: int | None = None) -> Any:
"""Provide a rich display of the hierarchy.
!!! warning "Deprecated"
`zarr.tree()` is deprecated since v3.0.0 and will be removed in a future release.
Use `group.tree()` instead.
Parameters
----------
grp : Group
Zarr or h5py group.
expand : bool, optional
Only relevant for HTML representation. If True, tree will be fully expanded.
level : int, optional
Maximum depth to descend into hierarchy.
Returns
-------
TreeRepr
A pretty-printable object displaying the hierarchy.
"""
return await grp.tree(expand=expand, level=level)
async def array(data: npt.ArrayLike | AnyArray, **kwargs: Any) -> AnyAsyncArray:
"""Create an array filled with `data`.
Parameters
----------
data : array_like
The data to fill the array with.
**kwargs
Passed through to [`create`][zarr.api.asynchronous.create].
Returns
-------
array : array
The new array.
"""
if isinstance(data, Array):
return await from_array(data=data, **kwargs)
# ensure data is array-like
if not hasattr(data, "shape") or not hasattr(data, "dtype"):
data = np.asanyarray(data)
# setup dtype
kw_dtype = kwargs.get("dtype")
if kw_dtype is None and hasattr(data, "dtype"):
kwargs["dtype"] = data.dtype
else:
kwargs["dtype"] = kw_dtype
# setup shape and chunks
data_shape, data_chunks = _get_shape_chunks(data)
kwargs["shape"] = data_shape
kw_chunks = kwargs.get("chunks")
if kw_chunks is None:
kwargs["chunks"] = data_chunks
else:
kwargs["chunks"] = kw_chunks
read_only = kwargs.pop("read_only", False)
if read_only:
raise ValueError("read_only=True is no longer supported when creating new arrays")
# instantiate array
z = await create(**kwargs)
# fill with data
await z.setitem(Ellipsis, data)
return z
async def group(
*, # Note: this is a change from v2
store: StoreLike | None = None,
overwrite: bool = False,
chunk_store: StoreLike | None = None, # not used
cache_attrs: bool | None = None, # not used, default changed
synchronizer: Any | None = None, # not used
path: str | None = None,
zarr_version: ZarrFormat | None = None, # deprecated
zarr_format: ZarrFormat | None = None,
meta_array: Any | None = None, # not used
attributes: dict[str, JSON] | None = None,
storage_options: dict[str, Any] | None = None,
) -> AsyncGroup:
"""Create a group.
Parameters
----------
store : StoreLike or None, default=None
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
overwrite : bool, optional
If True, delete any pre-existing data in `store` at `path` before
creating the group.
chunk_store : StoreLike or None, default=None
Separate storage for chunks. Not implemented.
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
synchronizer : object, optional
Array synchronizer.
path : str, optional
Group path within store.
meta_array : array-like, optional
An array instance to use for determining arrays to create and return
to users. Use `numpy.empty(())` by default.
zarr_format : {2, 3, None}, optional
The zarr format to use when saving.
storage_options : dict
If using an fsspec URL to create the store, these will be passed to
the backend implementation. Ignored otherwise.
Returns
-------
g : group
The new group.
"""
mode: AccessModeLiteral
if overwrite:
mode = "w"
else:
mode = "a"
return await open_group(
store=store,
mode=mode,
chunk_store=chunk_store,
cache_attrs=cache_attrs,
synchronizer=synchronizer,
path=path,
zarr_version=zarr_version,
zarr_format=zarr_format,
meta_array=meta_array,
attributes=attributes,
storage_options=storage_options,
)
async def create_group(
*,
store: StoreLike,
path: str | None = None,
overwrite: bool = False,
zarr_format: ZarrFormat | None = None,
attributes: dict[str, Any] | None = None,
storage_options: dict[str, Any] | None = None,
) -> AsyncGroup:
"""Create a group.
Parameters
----------
store : StoreLike
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
path : str, optional
Group path within store.
overwrite : bool, optional
If True, pre-existing data at ``path`` will be deleted before
creating the group.
zarr_format : {2, 3, None}, optional
The zarr format to use when saving.
If no ``zarr_format`` is provided, the default format will be used.
This default can be changed by modifying the value of ``default_zarr_format``
in [`zarr.config`][zarr.config].
storage_options : dict
If using an fsspec URL to create the store, these will be passed to
the backend implementation. Ignored otherwise.
Returns
-------
AsyncGroup
The new group.
"""
if zarr_format is None:
zarr_format = _default_zarr_format()
mode: Literal["a"] = "a"
store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options)
return await AsyncGroup.from_store(
store=store_path,
zarr_format=zarr_format,
overwrite=overwrite,
attributes=attributes,
)
async def open_group(
store: StoreLike | None = None,
*, # Note: this is a change from v2
mode: AccessModeLiteral = "a",
cache_attrs: bool | None = None, # not used, default changed
synchronizer: Any = None, # not used
path: str | None = None,
chunk_store: StoreLike | None = None, # not used
storage_options: dict[str, Any] | None = None,
zarr_version: ZarrFormat | None = None, # deprecated
zarr_format: ZarrFormat | None = None,
meta_array: Any | None = None, # not used
attributes: dict[str, JSON] | None = None,
use_consolidated: bool | str | None = None,
) -> AsyncGroup:
"""Open a group using file-mode-like semantics.
Parameters
----------
store : StoreLike or None, default=None
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
synchronizer : object, optional
Array synchronizer.
path : str, optional
Group path within store.
chunk_store : StoreLike or None, default=None
Separate storage for chunks. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
storage_options : dict
If using an fsspec URL to create the store, these will be passed to
the backend implementation. Ignored otherwise.
meta_array : array-like, optional
An array instance to use for determining arrays to create and return
to users. Use `numpy.empty(())` by default.
attributes : dict
A dictionary of JSON-serializable values with user-defined attributes.
use_consolidated : bool or str, default None
Whether to use consolidated metadata.
By default, consolidated metadata is used if it's present in the
store (in the ``zarr.json`` for Zarr format 3 and in the ``.zmetadata`` file
for Zarr format 2).
To explicitly require consolidated metadata, set ``use_consolidated=True``,
which will raise an exception if consolidated metadata is not found.
To explicitly *not* use consolidated metadata, set ``use_consolidated=False``,
which will fall back to using the regular, non consolidated metadata.
Zarr format 2 allowed configuring the key storing the consolidated metadata
(``.zmetadata`` by default). Specify the custom key as ``use_consolidated``
to load consolidated metadata from a non-default key.
Returns
-------
g : group
The new group.
"""
zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format)
if cache_attrs is not None:
warnings.warn("cache_attrs is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if synchronizer is not None:
warnings.warn("synchronizer is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if meta_array is not None:
warnings.warn("meta_array is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if chunk_store is not None:
warnings.warn("chunk_store is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
store_path = await make_store_path(store, mode=mode, storage_options=storage_options, path=path)
if attributes is None:
attributes = {}
try:
if mode in _READ_MODES:
return await AsyncGroup.open(
store_path, zarr_format=zarr_format, use_consolidated=use_consolidated
)
except (KeyError, FileNotFoundError):
pass
if mode in _CREATE_MODES:
overwrite = _infer_overwrite(mode)
_zarr_format = zarr_format or _default_zarr_format()
return await AsyncGroup.from_store(
store_path,
zarr_format=_zarr_format,
overwrite=overwrite,
attributes=attributes,
)
msg = f"No group found in store {store!r} at path {store_path.path!r}"
raise GroupNotFoundError(msg)
async def create(
shape: tuple[int, ...] | int,
*, # Note: this is a change from v2
chunks: tuple[int, ...] | int | bool | None = None,
dtype: ZDTypeLike | None = None,
compressor: CompressorLike = "auto",
fill_value: Any | None = DEFAULT_FILL_VALUE,
order: MemoryOrder | None = None,
store: StoreLike | None = None,
synchronizer: Any | None = None,
overwrite: bool = False,
path: PathLike | None = None,
chunk_store: StoreLike | None = None,
filters: Iterable[dict[str, JSON] | Numcodec] | None = None,
cache_metadata: bool | None = None,
cache_attrs: bool | None = None,
read_only: bool | None = None,
object_codec: Codec | None = None, # TODO: type has changed
dimension_separator: Literal[".", "/"] | None = None,
write_empty_chunks: bool | None = None,
zarr_version: ZarrFormat | None = None, # deprecated
zarr_format: ZarrFormat | None = None,
meta_array: Any | None = None, # TODO: need type
attributes: dict[str, JSON] | None = None,
# v3 only
chunk_shape: tuple[int, ...] | int | None = None,
chunk_key_encoding: (
ChunkKeyEncoding
| tuple[Literal["default"], Literal[".", "/"]]
| tuple[Literal["v2"], Literal[".", "/"]]
| None
) = None,
codecs: Iterable[Codec | dict[str, JSON]] | None = None,
dimension_names: DimensionNames = None,
storage_options: dict[str, Any] | None = None,
config: ArrayConfigLike | None = None,
**kwargs: Any,
) -> AnyAsyncArray:
"""Create an array.
Parameters
----------
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from ``shape`` and ``dtype``. If
False, will be set to ``shape``, i.e., single chunk for the whole array.
If an int, the chunk size in each dimension will be given by the value
of ``chunks``. Default is True.
dtype : str or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor to compress chunk data.
Zarr format 2 only. Zarr format 3 arrays should use ``codecs`` instead.
If neither ``compressor`` nor ``filters`` are provided, the default compressor
[`zarr.codecs.ZstdCodec`][] is used.
If ``compressor`` is set to ``None``, no compression is used.
fill_value : Any, optional
Fill value for the array.
order : {'C', 'F'}, optional
Deprecated in favor of the ``config`` keyword argument.
Pass ``{'order': <value>}`` to ``create`` instead of using this parameter.
Memory layout to be used within each chunk.
If not specified, the ``array.order`` parameter in the global config will be used.
store : StoreLike or None, default=None
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
synchronizer : object, optional
Array synchronizer.
overwrite : bool, optional
If True, delete all pre-existing data in ``store`` at ``path`` before
creating the array.
path : str, optional
Path under which array is stored.
chunk_store : StoreLike or None, default=None
Separate storage for chunks. If not provided, ``store`` will be used
for storage of both chunks and metadata.
filters : Iterable[Codec] | Literal["auto"], optional
Iterable of filters to apply to each chunk of the array, in order, before serializing that
chunk to bytes.
For Zarr format 3, a "filter" is a codec that takes an array and returns an array,
and these values must be instances of [`zarr.abc.codec.ArrayArrayCodec`][], or a
dict representations of [`zarr.abc.codec.ArrayArrayCodec`][].
For Zarr format 2, a "filter" can be any numcodecs codec; you should ensure that the
the order if your filters is consistent with the behavior of each filter.
The default value of ``"auto"`` instructs Zarr to use a default used based on the data
type of the array and the Zarr format specified. For all data types in Zarr V3, and most
data types in Zarr V2, the default filters are empty. The only cases where default filters
are not empty is when the Zarr format is 2, and the data type is a variable-length data type like
[`zarr.dtype.VariableLengthUTF8`][] or [`zarr.dtype.VariableLengthUTF8`][]. In these cases,
the default filters contains a single element which is a codec specific to that particular data type.
To create an array with no filters, provide an empty iterable or the value ``None``.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
read_only : bool, optional
True if array should be protected against modification.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
dimension_separator : {'.', '/'}, optional
Separator placed between the dimensions of a chunk.
Zarr format 2 only. Zarr format 3 arrays should use ``chunk_key_encoding`` instead.
write_empty_chunks : bool, optional
Deprecated in favor of the ``config`` keyword argument.
Pass ``{'write_empty_chunks': <value>}`` to ``create`` instead of using this parameter.
If True, all chunks will be stored regardless of their
contents. If False, each chunk is compared to the array's fill value
prior to storing. If a chunk is uniformly equal to the fill value, then
that chunk is not be stored, and the store entry for that chunk's key
is deleted.
zarr_format : {2, 3, None}, optional
The Zarr format to use when creating an array. The default is ``None``,
which instructs Zarr to choose the default Zarr format value defined in the
runtime configuration.
meta_array : array-like, optional
Not implemented.
attributes : dict[str, JSON], optional
A dictionary of user attributes to store with the array.
chunk_shape : int or tuple of ints, optional
The shape of the Array's chunks (default is None).
Zarr format 3 only. Zarr format 2 arrays should use `chunks` instead.
chunk_key_encoding : ChunkKeyEncoding, optional
A specification of how the chunk keys are represented in storage.
Zarr format 3 only. Zarr format 2 arrays should use `dimension_separator` instead.
Default is ``("default", "/")``.
codecs : Sequence of Codecs or dicts, optional
An iterable of Codec or dict serializations of Codecs. Zarr V3 only.
The elements of ``codecs`` specify the transformation from array values to stored bytes.
Zarr format 3 only. Zarr format 2 arrays should use ``filters`` and ``compressor`` instead.
If no codecs are provided, default codecs will be used based on the data type of the array.
For most data types, the default codecs are the tuple ``(BytesCodec(), ZstdCodec())``;
data types that require a special [`zarr.abc.codec.ArrayBytesCodec`][], like variable-length strings or bytes,
will use the [`zarr.abc.codec.ArrayBytesCodec`][] required for the data type instead of [`zarr.codecs.BytesCodec`][].
dimension_names : Iterable[str | None] | None = None
An iterable of dimension names. Zarr format 3 only.
storage_options : dict
If using an fsspec URL to create the store, these will be passed to
the backend implementation. Ignored otherwise.
config : ArrayConfigLike, optional
Runtime configuration of the array. If provided, will override the
default values from `zarr.config.array`.
Returns
-------
z : array
The array.
"""
zarr_format = (
_handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format)
or _default_zarr_format()
)
if synchronizer is not None:
warnings.warn("synchronizer is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if chunk_store is not None:
warnings.warn("chunk_store is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if cache_metadata is not None:
warnings.warn("cache_metadata is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if cache_attrs is not None:
warnings.warn("cache_attrs is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if object_codec is not None:
warnings.warn("object_codec is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if read_only is not None:
warnings.warn("read_only is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if meta_array is not None:
warnings.warn("meta_array is not yet implemented", ZarrRuntimeWarning, stacklevel=2)
if write_empty_chunks is not None:
_warn_write_empty_chunks_kwarg()
mode = kwargs.pop("mode", None)
if mode is None:
mode = "a"
store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options)
config_parsed = parse_array_config(config)
if write_empty_chunks is not None:
if config is not None:
msg = (
"Both write_empty_chunks and config keyword arguments are set. "
"This is redundant. When both are set, write_empty_chunks will be used instead "
"of the value in config."
)
warnings.warn(ZarrUserWarning(msg), stacklevel=1)
config_parsed = dataclasses.replace(config_parsed, write_empty_chunks=write_empty_chunks)
return await AsyncArray._create(
store_path,
shape=shape,
chunks=chunks,
dtype=dtype,
compressor=compressor,
fill_value=fill_value,
overwrite=overwrite,
filters=filters,
dimension_separator=dimension_separator,
order=order,
zarr_format=zarr_format,
chunk_shape=chunk_shape,
chunk_key_encoding=chunk_key_encoding,
codecs=codecs,
dimension_names=dimension_names,
attributes=attributes,
config=config_parsed,
**kwargs,
)
async def empty(shape: tuple[int, ...], **kwargs: Any) -> AnyAsyncArray:
"""Create an empty array with the specified shape. The contents will be filled with the
specified fill value or zeros if no fill value is provided.
Parameters
----------
shape : int or tuple of int
Shape of the empty array.
**kwargs
Keyword arguments passed to [`create`][zarr.api.asynchronous.create].
Notes
-----
The contents of an empty Zarr array are not defined. On attempting to
retrieve data from an empty Zarr array, any values may be returned,
and these are not guaranteed to be stable from one access to the next.
"""
return await create(shape=shape, **kwargs)
async def empty_like(a: ArrayLike, **kwargs: Any) -> AnyAsyncArray:
"""Create an empty array like `a`. The contents will be filled with the
array's fill value or zeros if no fill value is provided.
Parameters
----------
a : array-like
The array to create an empty array like.
**kwargs
Keyword arguments passed to [`create`][zarr.api.asynchronous.create].
Returns
-------
Array
The new array.
Notes
-----
The contents of an empty Zarr array are not defined. On attempting to
retrieve data from an empty Zarr array, any values may be returned,
and these are not guaranteed to be stable from one access to the next.
"""
like_kwargs = _like_args(a) | kwargs
if isinstance(a, (AsyncArray | Array)):
like_kwargs.setdefault("fill_value", a.metadata.fill_value)
return await empty(**like_kwargs) # type: ignore[arg-type]
# TODO: add type annotations for fill_value and kwargs
async def full(shape: tuple[int, ...], fill_value: Any, **kwargs: Any) -> AnyAsyncArray:
"""Create an array, with `fill_value` being used as the default value for
uninitialized portions of the array.
Parameters
----------
shape : int or tuple of int
Shape of the empty array.
fill_value : scalar
Fill value.
**kwargs
Keyword arguments passed to [`create`][zarr.api.asynchronous.create].
Returns
-------
Array
The new array.
"""
return await create(shape=shape, fill_value=fill_value, **kwargs)
# TODO: add type annotations for kwargs
async def full_like(a: ArrayLike, **kwargs: Any) -> AnyAsyncArray:
"""Create a filled array like `a`.
Parameters
----------
a : array-like
The array to create an empty array like.
**kwargs
Keyword arguments passed to [`zarr.api.asynchronous.create`][].
Returns
-------
Array
The new array.
"""
like_kwargs = _like_args(a) | kwargs
if isinstance(a, (AsyncArray | Array)):
like_kwargs.setdefault("fill_value", a.metadata.fill_value)
return await full(**like_kwargs) # type: ignore[arg-type]
async def ones(shape: tuple[int, ...], **kwargs: Any) -> AnyAsyncArray:
"""Create an array, with one being used as the default value for
uninitialized portions of the array.
Parameters
----------
shape : int or tuple of int
Shape of the empty array.
**kwargs
Keyword arguments passed to [`zarr.api.asynchronous.create`][].
Returns
-------
Array
The new array.
"""
return await create(shape=shape, fill_value=1, **kwargs)
async def ones_like(a: ArrayLike, **kwargs: Any) -> AnyAsyncArray:
"""Create an array of ones like `a`.
Parameters
----------
a : array-like
The array to create an empty array like.
**kwargs
Keyword arguments passed to [`zarr.api.asynchronous.create`][].
Returns
-------
Array
The new array.
"""
like_kwargs = _like_args(a) | kwargs
return await ones(**like_kwargs) # type: ignore[arg-type]
async def open_array(
*, # note: this is a change from v2
store: StoreLike | None = None,
zarr_version: ZarrFormat | None = None, # deprecated
zarr_format: ZarrFormat | None = None,
path: PathLike = "",
storage_options: dict[str, Any] | None = None,
**kwargs: Any, # TODO: type kwargs as valid args to save
) -> AnyAsyncArray:
"""Open an array using file-mode-like semantics.
Parameters
----------
store : StoreLike
StoreLike object to open. See the
[storage documentation in the user guide][user-guide-store-like]
for a description of all valid StoreLike values.
zarr_version : {2, 3, None}, optional
The zarr format to use when saving. Deprecated in favor of zarr_format.
zarr_format : {2, 3, None}, optional
The zarr format to use when saving.
path : str, optional
Path in store to array.
storage_options : dict
If using an fsspec URL to create the store, these will be passed to
the backend implementation. Ignored otherwise.
**kwargs
Any keyword arguments to pass to [`create`][zarr.api.asynchronous.create].
Returns
-------
AsyncArray
The opened array.
"""
mode = kwargs.pop("mode", None)
store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options)
zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format)
if "write_empty_chunks" in kwargs:
_warn_write_empty_chunks_kwarg()
try:
return await AsyncArray.open(store_path, zarr_format=zarr_format)
except FileNotFoundError as err:
if not store_path.read_only and mode in _CREATE_MODES:
overwrite = _infer_overwrite(mode)
_zarr_format = zarr_format or _default_zarr_format()
return await create(
store=store_path,
zarr_format=_zarr_format,
overwrite=overwrite,
**kwargs,
)
msg = f"No array found in store {store_path.store} at path {store_path.path}"
raise ArrayNotFoundError(msg) from err
async def open_like(a: ArrayLike, path: str, **kwargs: Any) -> AnyAsyncArray:
"""Open a persistent array like `a`.
Parameters
----------
a : Array
The shape and data-type of a define these same attributes of the returned array.
path : str
The path to the new array.
**kwargs
Any keyword arguments to pass to the array constructor.
Returns
-------
AsyncArray
The opened array.
"""
like_kwargs = _like_args(a) | kwargs
if isinstance(a, (AsyncArray | Array)):
like_kwargs.setdefault("fill_value", a.metadata.fill_value)
return await open_array(path=path, **like_kwargs) # type: ignore[arg-type]
async def zeros(shape: tuple[int, ...], **kwargs: Any) -> AnyAsyncArray:
"""Create an array, with zero being used as the default value for
uninitialized portions of the array.
Parameters
----------
shape : int or tuple of int
Shape of the empty array.
**kwargs
Keyword arguments passed to [`zarr.api.asynchronous.create`][].
Returns
-------
Array
The new array.
"""
return await create(shape=shape, fill_value=0, **kwargs)
async def zeros_like(a: ArrayLike, **kwargs: Any) -> AnyAsyncArray:
"""Create an array of zeros like `a`.
Parameters
----------
a : array-like
The array to create an empty array like.
**kwargs
Keyword arguments passed to [`create`][zarr.api.asynchronous.create].
Returns
-------
Array
The new array.
"""
like_kwargs = _like_args(a) | kwargs
return await zeros(**like_kwargs) # type: ignore[arg-type]
| _LikeArgs |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/output/windows10.py | {
"start": 622,
"end": 4362
} | class ____:
"""
Windows 10 output abstraction. This enables and uses vt100 escape sequences.
"""
def __init__(
self, stdout: TextIO, default_color_depth: ColorDepth | None = None
) -> None:
self.default_color_depth = default_color_depth
self.win32_output = Win32Output(stdout, default_color_depth=default_color_depth)
self.vt100_output = Vt100_Output(
stdout, lambda: Size(0, 0), default_color_depth=default_color_depth
)
self._hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
def flush(self) -> None:
"""
Write to output stream and flush.
"""
original_mode = DWORD(0)
# Remember the previous console mode.
windll.kernel32.GetConsoleMode(self._hconsole, byref(original_mode))
# Enable processing of vt100 sequences.
windll.kernel32.SetConsoleMode(
self._hconsole,
DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING),
)
try:
self.vt100_output.flush()
finally:
# Restore console mode.
windll.kernel32.SetConsoleMode(self._hconsole, original_mode)
@property
def responds_to_cpr(self) -> bool:
return False # We don't need this on Windows.
def __getattr__(self, name: str) -> Any:
# NOTE: Now that we use "virtual terminal input" on
# Windows, both input and output are done through
# ANSI escape sequences on Windows. This means, we
# should enable bracketed paste like on Linux, and
# enable mouse support by calling the vt100_output.
if name in (
"get_size",
"get_rows_below_cursor_position",
"scroll_buffer_to_prompt",
"get_win32_screen_buffer_info",
# "enable_mouse_support",
# "disable_mouse_support",
# "enable_bracketed_paste",
# "disable_bracketed_paste",
):
return getattr(self.win32_output, name)
else:
return getattr(self.vt100_output, name)
def get_default_color_depth(self) -> ColorDepth:
"""
Return the default color depth for a windows terminal.
Contrary to the Vt100 implementation, this doesn't depend on a $TERM
variable.
"""
if self.default_color_depth is not None:
return self.default_color_depth
# Previously, we used `DEPTH_4_BIT`, even on Windows 10. This was
# because true color support was added after "Console Virtual Terminal
# Sequences" support was added, and there was no good way to detect
# what support was given.
# 24bit color support was added in 2016, so let's assume it's safe to
# take that as a default:
# https://devblogs.microsoft.com/commandline/24-bit-color-in-the-windows-console/
return ColorDepth.TRUE_COLOR
Output.register(Windows10_Output)
def is_win_vt100_enabled() -> bool:
"""
Returns True when we're running Windows and VT100 escape sequences are
supported.
"""
if sys.platform != "win32":
return False
hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
# Get original console mode.
original_mode = DWORD(0)
windll.kernel32.GetConsoleMode(hconsole, byref(original_mode))
try:
# Try to enable VT100 sequences.
result: int = windll.kernel32.SetConsoleMode(
hconsole, DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
)
return result == 1
finally:
windll.kernel32.SetConsoleMode(hconsole, original_mode)
| Windows10_Output |
python | allegroai__clearml | clearml/automation/scheduler.py | {
"start": 20837,
"end": 40992
} | class ____(BaseScheduler):
"""
Task Scheduling controller.
Notice time-zone is ALWAYS UTC
"""
_configuration_section = "schedule"
def __init__(
self,
sync_frequency_minutes: float = 15,
force_create_task_name: Optional[str] = None,
force_create_task_project: Optional[str] = None,
) -> None:
"""
Create a Task scheduler service
:param sync_frequency_minutes: Sync task scheduler configuration every X minutes.
Allow to change scheduler in runtime by editing the Task configuration object
:param force_create_task_name: Optional, force creation of Task Scheduler service,
even if main Task.init already exists.
:param force_create_task_project: Optional, force creation of Task Scheduler service,
even if main Task.init already exists.
"""
super(TaskScheduler, self).__init__(
sync_frequency_minutes=sync_frequency_minutes,
force_create_task_name=force_create_task_name,
force_create_task_project=force_create_task_project,
)
self._schedule_jobs = [] # List[ScheduleJob]
self._timeout_jobs = {} # Dict[datetime, str]
self._executed_jobs = [] # List[ExecutedJob]
def add_task(
self,
schedule_task_id: Union[str, Task] = None,
schedule_function: Callable = None,
queue: str = None,
name: Optional[str] = None,
target_project: Optional[str] = None,
minute: Optional[int] = None,
hour: Optional[int] = None,
day: Optional[int] = None,
weekdays: Optional[List[str]] = None,
month: Optional[int] = None,
year: Optional[int] = None,
limit_execution_time: Optional[float] = None,
single_instance: bool = False,
recurring: bool = True,
execute_immediately: bool = False,
reuse_task: bool = False,
task_parameters: Optional[dict] = None,
task_overrides: Optional[dict] = None,
) -> bool:
"""
Create a cron job-like scheduling for a pre-existing Task.
Notice, it is recommended to give the schedule entry a descriptive unique name,
if not provided, a name is randomly generated.
When timespec parameters are specified exclusively, they define the time between task launches (see
`year` and `weekdays` exceptions). When multiple timespec parameters are specified, the parameter representing
the longest duration defines the time between task launches, and the shorter timespec parameters define specific
times.
Examples:
Launch every 15 minutes:
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', minute=15)
Launch every 1 hour:
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', hour=1)
Launch every 1 hour at hour:30 minutes (i.e. 1:30, 2:30 etc.):
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', hour=1, minute=30)
Launch every day at 22:30 (10:30 pm):
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', minute=30, hour=22, day=1)
Launch every other day at 7:30 (7:30 am):
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', minute=30, hour=7, day=2)
Launch every Saturday at 8:30am (notice `day=0`):
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', minute=30, hour=8, day=0, weekdays=['saturday'])
Launch every 2 hours on the weekends Saturday/Sunday (notice `day` is not passed):
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', hour=2, weekdays=['saturday', 'sunday'])
Launch once a month at the 5th of each month:
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', month=1, day=5)
Launch once a year on March 4th:
.. code-block:: py
add_task(schedule_task_id='1235', queue='default', year=1, month=3, day=4)
:param schedule_task_id: ID of Task to be cloned and scheduled for execution
:param schedule_function: Optional, instead of providing Task ID to be scheduled,
provide a function to be called. Notice the function is called from the scheduler context
(i.e. running on the same machine as the scheduler)
:param queue: Name or ID of queue to put the Task into (i.e. schedule)
:param name: Name or description for the cron Task (should be unique if provided, otherwise randomly generated)
:param target_project: Specify target project to put the cloned scheduled Task in.
:param minute: Time (in minutes) between task launches. If specified together with `hour`, `day`, `month`,
and / or `year`, it defines the minute of the hour
:param hour: Time (in hours) between task launches. If specified together with `day`, `month`, and / or
`year`, it defines the hour of day.
:param day: Time (in days) between task executions. If specified together with `month` and / or `year`,
it defines the day of month
:param weekdays: Days of week to launch task (accepted inputs: 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
:param month: Time (in months) between task launches. If specified with `year`, it defines a specific month
:param year: Specific year if value >= current year. Time (in years) between task launches if
value <= 100
:param limit_execution_time: Limit the execution time (in hours) of the specific job.
:param single_instance: If True, do not launch the Task job if the previous instance is still running
(skip until the next scheduled time period). Default False.
:param recurring: If False, only launch the Task once (default: True, repeat)
:param execute_immediately: If True, schedule the Task to be executed immediately
then recurring based on the timing schedule arguments. Default False.
:param reuse_task: If True, re-enqueue the same Task (i.e. do not clone it) every time, default False.
:param task_parameters: Configuration parameters to the executed Task.
for example: ``{'Args/batch': '12'}`` Notice: not available when reuse_task=True
:param task_overrides: Change task definition.
for example ``{'script.version_num': None, 'script.branch': 'main'}`` Notice: not available when reuse_task=True
:return: True if job is successfully added to the scheduling list
"""
mutually_exclusive(schedule_function=schedule_function, schedule_task_id=schedule_task_id)
task_id = schedule_task_id.id if isinstance(schedule_task_id, Task) else str(schedule_task_id or "")
# noinspection PyProtectedMember
job = ScheduleJob(
name=name or task_id,
base_task_id=task_id,
base_function=schedule_function,
queue=queue,
target_project=target_project,
execution_limit_hours=limit_execution_time,
recurring=bool(recurring),
single_instance=bool(single_instance),
task_parameters=task_parameters,
task_overrides=task_overrides,
clone_task=not bool(reuse_task),
starting_time=datetime.fromtimestamp(0) if execute_immediately else datetime.now(timezone.utc),
minute=minute,
hour=hour,
day=day,
weekdays=weekdays,
month=month,
year=year,
)
# raise exception if not valid
job.verify()
self._schedule_jobs.append(job)
return True
def get_scheduled_tasks(self) -> List[ScheduleJob]:
"""
Return the current set of scheduled jobs
:return: List of ScheduleJob instances
"""
return self._schedule_jobs
def remove_task(self, task_id: Union[str, Task, Callable]) -> bool:
"""
Remove a Task ID from the scheduled task list.
:param task_id: Task or Task ID to be removed
:return: return True of the Task ID was found in the scheduled jobs list and was removed.
"""
if isinstance(task_id, (Task, str)):
task_id = task_id.id if isinstance(task_id, Task) else str(task_id)
if not any(t.base_task_id == task_id for t in self._schedule_jobs):
return False
self._schedule_jobs = [t for t in self._schedule_jobs if t.base_task_id != task_id]
else:
if not any(t.base_function == task_id for t in self._schedule_jobs):
return False
self._schedule_jobs = [t for t in self._schedule_jobs if t.base_function != task_id]
return True
def start(self) -> None:
"""
Start the Task TaskScheduler loop (notice this function does not return)
"""
super(TaskScheduler, self).start()
def _step(self) -> bool:
"""
scheduling processing step
"""
# update next execution datetime
for j in self._schedule_jobs:
j.next()
# get idle timeout (aka sleeping)
scheduled_jobs = sorted(
[j for j in self._schedule_jobs if j.next_run() is not None],
key=lambda x: x.next_run(),
)
# sort by key
timeout_job_datetime = min(self._timeout_jobs, key=self._timeout_jobs.get) if self._timeout_jobs else None
if not scheduled_jobs and timeout_job_datetime is None:
# sleep and retry
seconds = 60.0 * self._sync_frequency_minutes
self._log("Nothing to do, sleeping for {:.2f} minutes.".format(seconds / 60.0))
sleep(seconds)
return False
next_time_stamp = scheduled_jobs[0].next_run() if scheduled_jobs else None
if timeout_job_datetime is not None:
next_time_stamp = min(next_time_stamp, timeout_job_datetime) if next_time_stamp else timeout_job_datetime
sleep_time = (next_time_stamp - datetime.now(timezone.utc)).total_seconds()
if sleep_time > 0:
# sleep until we need to run a job or maximum sleep time
seconds = min(sleep_time, 60.0 * self._sync_frequency_minutes)
self._log(
"Waiting for next run [UTC {}], sleeping for {:.2f} minutes, until next sync.".format(
next_time_stamp, seconds / 60.0
)
)
sleep(seconds)
return False
# check if this is a Task timeout check
if timeout_job_datetime is not None and next_time_stamp == timeout_job_datetime:
task_id = self._timeout_jobs[timeout_job_datetime]
self._log("Aborting job due to timeout: {}".format(task_id))
self._cancel_task(task_id=task_id)
self._timeout_jobs.pop(timeout_job_datetime, None)
else:
self._log("Launching job: {}".format(scheduled_jobs[0]))
self._launch_job(scheduled_jobs[0])
return True
def start_remotely(self, queue: str = "services") -> None:
"""
Start the Task TaskScheduler loop (notice this function does not return)
:param queue: Remote queue to run the scheduler on, default 'services' queue.
"""
super(TaskScheduler, self).start_remotely(queue=queue)
def _serialize(self) -> None:
"""
Serialize Task scheduling configuration only (no internal state)
"""
# noinspection PyProtectedMember
self._task._set_configuration(
config_type="json",
description="schedule tasks configuration",
config_text=self._serialize_schedule_into_string(),
name=self._configuration_section,
)
def _serialize_state(self) -> None:
"""
Serialize internal state only
"""
json_str = json.dumps(
dict(
scheduled_jobs=[j.to_dict(full=True) for j in self._schedule_jobs],
timeout_jobs={datetime_to_isoformat(k): v for k, v in self._timeout_jobs.items()},
executed_jobs=[j.to_dict(full=True) for j in self._executed_jobs],
),
default=datetime_to_isoformat,
)
self._task.upload_artifact(name="state", artifact_object=json_str, preview="scheduler internal state")
def _deserialize_state(self) -> None:
"""
Deserialize internal state only
"""
# get artifact
self._task.reload()
artifact_object = self._task.artifacts.get("state")
if artifact_object is not None:
state_json_str = artifact_object.get(force_download=True)
if state_json_str is not None:
state_dict = json.loads(state_json_str)
self._schedule_jobs = self.__deserialize_scheduled_jobs(
serialized_jobs_dicts=state_dict.get("scheduled_jobs", []),
current_jobs=self._schedule_jobs,
)
self._timeout_jobs = {datetime_from_isoformat(k): v for k, v in (state_dict.get("timeout_jobs") or {})}
self._executed_jobs = [ExecutedJob(**j) for j in state_dict.get("executed_jobs", [])]
def _deserialize(self) -> None:
"""
Deserialize Task scheduling configuration only
"""
self._log("Syncing scheduler")
self._task.reload()
# noinspection PyProtectedMember
json_str = self._task._get_configuration_text(name=self._configuration_section)
try:
self._schedule_jobs = self.__deserialize_scheduled_jobs(
serialized_jobs_dicts=json.loads(json_str),
current_jobs=self._schedule_jobs,
)
except Exception as ex:
self._log("Failed deserializing configuration: {}".format(ex), level=logging.WARN)
return
@staticmethod
def __deserialize_scheduled_jobs(
serialized_jobs_dicts: List[Dict], current_jobs: List[ScheduleJob]
) -> List[ScheduleJob]:
scheduled_jobs = [ScheduleJob().update(j) for j in serialized_jobs_dicts]
scheduled_jobs = {j.name: j for j in scheduled_jobs}
current_scheduled_jobs = {j.name: j for j in current_jobs}
# select only valid jobs, and update the valid ones state from the current one
new_scheduled_jobs = [
current_scheduled_jobs[name].update(j) if name in current_scheduled_jobs else j
for name, j in scheduled_jobs.items()
]
# verify all jobs
for j in new_scheduled_jobs:
j.verify()
return new_scheduled_jobs
def _serialize_schedule_into_string(self) -> str:
return json.dumps([j.to_dict() for j in self._schedule_jobs], default=datetime_to_isoformat)
def _update_execution_plots(self) -> None:
"""
Update the configuration and execution table plots
"""
if not self._task:
return
task_link_template = (
self._task.get_output_log_web_page()
.replace("/{}/".format(self._task.project), "/{project}/")
.replace("/{}/".format(self._task.id), "/{task}/")
)
# plot the schedule definition
columns = [
"name",
"base_task_id",
"base_function",
"next_run",
"target_project",
"queue",
"minute",
"hour",
"day",
"month",
"year",
"starting_time",
"execution_limit_hours",
"recurring",
"single_instance",
"task_parameters",
"task_overrides",
"clone_task",
]
scheduler_table = [columns]
for j in self._schedule_jobs:
j_dict = j.to_dict()
j_dict["next_run"] = j.next()
j_dict["base_function"] = (
"{}.{}".format(
getattr(j.base_function, "__module__", ""),
getattr(j.base_function, "__name__", ""),
)
if j.base_function
else ""
)
if not j_dict.get("base_task_id"):
j_dict["clone_task"] = ""
row = [
str(j_dict.get(c)).split(".", 1)[0] if isinstance(j_dict.get(c), datetime) else str(j_dict.get(c) or "")
for c in columns
]
if row[1]:
row[1] = '<a href="{}">{}</a>'.format(task_link_template.format(project="*", task=row[1]), row[1])
scheduler_table += [row]
# plot the already executed Tasks
executed_table = [["name", "task id", "started", "finished"]]
for executed_job in sorted(self._executed_jobs, key=lambda x: x.started, reverse=True):
if not executed_job.finished:
if executed_job.task_id:
t = Task.get_task(task_id=executed_job.task_id)
if t.status not in ("in_progress", "queued"):
executed_job.finished = t.data.completed or datetime.now(timezone.utc)
elif executed_job.thread_id:
# noinspection PyBroadException
try:
a_thread = [t for t in enumerate_threads() if t.ident == executed_job.thread_id]
if not a_thread or not a_thread[0].is_alive():
executed_job.finished = datetime.now(timezone.utc)
except Exception:
pass
executed_table += [
[
executed_job.name,
'<a href="{}">{}</a>'.format(
task_link_template.format(project="*", task=executed_job.task_id),
executed_job.task_id,
)
if executed_job.task_id
else "function",
str(executed_job.started).split(".", 1)[0],
str(executed_job.finished).split(".", 1)[0],
]
]
self._task.get_logger().report_table(
title="Schedule Tasks", series=" ", iteration=0, table_plot=scheduler_table
)
self._task.get_logger().report_table(title="Executed Tasks", series=" ", iteration=0, table_plot=executed_table)
def _launch_job_task(
self,
job: ScheduleJob,
task_parameters: Optional[dict] = None,
add_tags: Optional[List[str]] = None,
) -> Optional[ClearmlJob]:
task_job = super(TaskScheduler, self)._launch_job_task(job, task_parameters=task_parameters, add_tags=add_tags)
# make sure this is not a function job
if task_job:
self._executed_jobs.append(
ExecutedJob(name=job.name, task_id=task_job.task_id(), started=datetime.now(timezone.utc))
)
# add timeout check
if job.get_execution_timeout():
# we should probably make sure we are not overwriting a Task
self._timeout_jobs[job.get_execution_timeout()] = task_job.task_id()
return task_job
def _launch_job_function(self, job: ScheduleJob, func_args: Optional[Sequence] = None) -> Optional[Thread]:
thread_job = super(TaskScheduler, self)._launch_job_function(job, func_args=func_args)
# make sure this is not a function job
if thread_job:
self._executed_jobs.append(
ExecutedJob(
name=job.name,
thread_id=str(thread_job.ident),
started=datetime.now(timezone.utc),
)
)
# execution timeout is not supported with function callbacks.
return thread_job
| TaskScheduler |
python | FactoryBoy__factory_boy | tests/test_base.py | {
"start": 15725,
"end": 16467
} | class ____(unittest.TestCase):
def test_extraction(self):
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
foo = declarations.PostGenerationDeclaration()
self.assertIn('foo', TestObjectFactory._meta.post_declarations.as_dict())
def test_classlevel_extraction(self):
class TestObjectFactory(base.Factory):
class Meta:
model = TestObject
foo = declarations.PostGenerationDeclaration()
foo__bar = 42
self.assertIn('foo', TestObjectFactory._meta.post_declarations.as_dict())
self.assertIn('foo__bar', TestObjectFactory._meta.post_declarations.as_dict())
| PostGenerationParsingTestCase |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_cairo.py | {
"start": 13330,
"end": 13446
} | class ____:
def __init__(self, slices, data):
self._slices = slices
self._data = data
| _CairoRegion |
python | apache__airflow | providers/google/tests/integration/google/cloud/transfers/test_bigquery_to_mssql.py | {
"start": 1435,
"end": 3704
} | class ____:
def setup_method(self):
os.environ["AIRFLOW_CONN_MSSQL_DEFAULT"] = AIRFLOW_CONN_MSSQL_DEFAULT
mssql_hook = MsSqlHook()
mssql_conn = mssql_hook.get_conn()
mssql_hook.set_autocommit(mssql_conn, True)
self.mssql_cursor = mssql_conn.cursor()
self.mssql_cursor.execute(f"""CREATE TABLE {TEST_TABLE_ID} (
PersonID int,
LastName varchar(255),
FirstName varchar(255),
Address varchar(255),
City varchar(255)
)""")
def teardown_method(self):
self.mssql_cursor.execute(f"DROP TABLE {TEST_TABLE_ID}")
@mock.patch(
"airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook",
return_value=mock.MagicMock(list_rows=mock.MagicMock(side_effect=[TEST_ROWS, []])),
)
def test_execute(self, mock_hook):
operator = BigQueryToMsSqlOperator(
task_id=TASK_ID,
source_project_dataset_table=f"{TEST_PROJECT_ID}.{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=TEST_TABLE_ID,
selected_fields=["PersonID", "LastName", "FirstName", "Address", "City"],
replace=False,
)
operator.execute(context=mock.MagicMock())
mock_hook.return_value.list_rows.assert_has_calls(
[
mock.call(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=1000,
selected_fields=["PersonID", "LastName", "FirstName", "Address", "City"],
start_index=0,
),
mock.call(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=1000,
selected_fields=["PersonID", "LastName", "FirstName", "Address", "City"],
start_index=1000,
),
],
any_order=False,
)
self.mssql_cursor.execute(f"SELECT * FROM {TEST_TABLE_ID}")
res = self.mssql_cursor.fetchone()
assert res == TEST_ROWS[0].values()
| TestBigQueryToMsSqlOperator |
python | ray-project__ray | python/ray/tune/stopper/noop.py | {
"start": 101,
"end": 238
} | class ____(Stopper):
def __call__(self, trial_id, result):
return False
def stop_all(self):
return False
| NoopStopper |
python | astropy__astropy | astropy/visualization/wcsaxes/coordinate_helpers.py | {
"start": 1496,
"end": 59287
} | class ____:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : `astropy.units.Quantity`
The angle at which the longitude wraps (defaults to 360 degrees).
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
default_label : str, optional
The axis label to show by default if none is set later.
"""
def __init__(
self,
parent_axes=None,
parent_map=None,
transform=None,
coord_index=None,
coord_type="scalar",
coord_unit=None,
coord_wrap=None,
frame=None,
format_unit=None,
default_label=None,
):
# Keep a reference to the parent axes and the transform
self._parent_axes = parent_axes
self._parent_map = parent_map
self._transform = transform
self._coord_index = coord_index
self._coord_unit = coord_unit
self._format_unit = format_unit
self._frame = frame
self._default_label = default_label or ""
self._auto_axislabel = True
self._axislabel_set = False
self._custom_formatter = None
# Disable auto label for elliptical frames as it puts labels in
# annoying places.
if issubclass(self.parent_axes.frame_class, EllipticalFrame):
self._auto_axislabel = False
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self._ticks = Ticks(
frame=self.frame, transform=parent_axes.transData + self.offset_transform
)
# Initialize tick labels
self._ticklabels = TickLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
self._ticks.display_minor_ticks(rcParams["xtick.minor.visible"])
self._minor_frequency = 5
# Initialize axis labels
self._axislabels = AxisLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
# Initialize container for the grid lines
self._grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self._grid_lines_kwargs = {
"visible": False,
"facecolor": "none",
"edgecolor": rcParams["grid.color"],
"linestyle": LINES_TO_PATCHES_LINESTYLE[rcParams["grid.linestyle"]],
"linewidth": rcParams["grid.linewidth"],
"alpha": rcParams["grid.alpha"],
"transform": self.parent_axes.transData,
}
@property
def parent_axes(self):
"""
The axes the coordinate helper belongs to.
"""
return self._parent_axes
@parent_axes.setter
def parent_axes(self, value):
warnings.warn(
"Setting CoordinateHelper.parent_axes directly is deprecated",
AstropyDeprecationWarning,
)
self._parent_axes = value
@property
def parent_map(self):
"""
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
"""
return self._parent_map
@parent_map.setter
def parent_map(self, value):
warnings.warn(
"Setting CoordinateHelper.parent_map directly is deprecated",
AstropyDeprecationWarning,
)
self._parent_map = value
@property
def transform(self):
"""
The transform corresponding to this coordinate system.
"""
return self._transform
@transform.setter
def transform(self, value):
warnings.warn(
"Setting CoordinateHelper.transform directly is deprecated",
AstropyDeprecationWarning,
)
self._transform = value
@property
def coord_index(self):
"""
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
"""
return self._coord_index
@coord_index.setter
def coord_index(self, value):
warnings.warn(
"Setting CoordinateHelper.coord_index directly is deprecated",
AstropyDeprecationWarning,
)
self._coord_index = value
@property
def coord_type(self):
"""
The type of this coordinate (e.g., ``'longitude'``)
"""
return self._coord_type
@coord_type.setter
def coord_type(self, value):
warnings.warn(
"Setting CoordinateHelper.coord_type directly is deprecated, use CoordinateHelper.set_coord_type instead",
AstropyDeprecationWarning,
)
self._coord_type = value
@property
def coord_unit(self):
"""
The unit that this coordinate is in given the output of transform.
"""
return self._coord_unit
@coord_unit.setter
def coord_unit(self, value):
warnings.warn(
"Setting CoordinateHelper.coord_unit directly is deprecated",
AstropyDeprecationWarning,
)
self._coord_unit = value
@property
def coord_wrap(self):
"""
The angle at which the longitude wraps (defaults to 360 degrees).
"""
return self._coord_wrap
@coord_wrap.setter
def coord_wrap(self, value):
warnings.warn(
"Setting CoordinateHelper.coord_wrap directly is deprecated, use CoordinateHelper.set_coord_type instead",
AstropyDeprecationWarning,
)
self._coord_wrap = value
@property
def frame(self):
"""
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
return self._frame
@frame.setter
def frame(self, value):
warnings.warn(
"Setting CoordinateHelper.frame directly is deprecated",
AstropyDeprecationWarning,
)
self._frame = value
@property
def default_label(self):
"""
The axis label to show by default if none is set later.
"""
return self._default_label
@default_label.setter
def default_label(self, value):
warnings.warn(
"Setting CoordinateHelper.default_label directly is deprecated",
AstropyDeprecationWarning,
)
self._default_label = value
@property
def ticks(self):
warnings.warn(
"CoordinateHelper.ticks should not be accessed directly and is deprecated",
AstropyDeprecationWarning,
)
return self._ticks
@ticks.setter
def ticks(self, value):
warnings.warn(
"Setting CoordinateHelper.ticks directly is deprecated",
AstropyDeprecationWarning,
)
self._ticks = value
@property
def ticklabels(self):
warnings.warn(
"CoordinateHelper.ticklabels should not be accessed directly and is deprecated",
AstropyDeprecationWarning,
)
return self._ticklabels
@ticklabels.setter
def ticklabels(self, value):
warnings.warn(
"Setting CoordinateHelper.ticklabels directly is deprecated",
AstropyDeprecationWarning,
)
self._ticklabels = value
@property
def axislabels(self):
warnings.warn(
"CoordinateHelper.axislabels should not be accessed directly and is deprecated",
AstropyDeprecationWarning,
)
return self._axislabels
@axislabels.setter
def axislabels(self, value):
warnings.warn(
"Setting CoordinateHelper.axislabels directly is deprecated",
AstropyDeprecationWarning,
)
self._axislabels = value
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == "lines" and not self.transform.has_inverse:
raise ValueError(
"The specified transform has no inverse, so the "
"grid cannot be drawn using grid_type='lines'"
)
if grid_type is None:
grid_type = "lines" if self.transform.has_inverse else "contours"
if grid_type in ("lines", "contours"):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if "color" in kwargs:
kwargs["edgecolor"] = kwargs.pop("color")
self._grid_lines_kwargs.update(kwargs)
if draw_grid is None:
draw_grid = True
self._grid_lines_kwargs["visible"] = draw_grid
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : `~astropy.units.Quantity`, optional
The value to wrap at for angular coordinates.
"""
self._coord_type = coord_type
if coord_wrap is not None and not isinstance(coord_wrap, u.Quantity):
warnings.warn(
"Passing 'coord_wrap' as a number is deprecated. Use a Quantity with units convertible to angular degrees instead.",
AstropyDeprecationWarning,
)
coord_wrap = coord_wrap * u.deg
if coord_type == "longitude" and coord_wrap is None:
self._coord_wrap = 360 * u.deg
elif coord_type != "longitude" and coord_wrap is not None:
raise NotImplementedError(
"coord_wrap is not yet supported for non-longitude coordinates"
)
else:
self._coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == "scalar":
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ["longitude", "latitude"]:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(
unit=self.coord_unit, format_unit=self._format_unit
)
else:
raise ValueError(
"coord_type should be one of 'scalar', 'longitude', or 'latitude'"
)
def set_major_formatter(self, formatter, show_decimal_unit=True):
"""
Set the format string to use for the major tick labels.
See :ref:`tick_label_format` for accepted format strings and examples.
Parameters
----------
formatter : str or callable
The format string to use, or a callable (for advanced use cases).
If specified as a callable, this should take a
`~astropy.units.Quantity` (which could be scalar or array) of tick
world coordinates as well as an optional ``spacing`` keyword
argument, which gives (also as a `~astropy.units.Quantity`) the
spacing between ticks, and returns an iterable of strings
containing the labels.
show_decimal_unit : str
Whether to show the unit or not when using decimal formatting (e.g.,
``d.dd`` or ``x.xxx``).
"""
if callable(formatter):
self._custom_formatter = formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
self._custom_formatter = None
else:
raise TypeError("formatter should be a string")
self._formatter_locator.show_decimal_unit = show_decimal_unit
def format_coord(self, value, format="auto"):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == "longitude":
value = wrap_angle_at(value, self.coord_wrap.to_value(u.deg))
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = self.formatter(
values=[value] * fl._unit, spacing=spacing, format=format
)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def get_format_unit(self):
"""
Get the unit for the major tick labels.
"""
return self._formatter_locator.format_unit
def set_ticks(
self,
values=None,
spacing=None,
number=None,
size=None,
width=None,
color=None,
alpha=None,
direction=None,
exclude_overlapping=None,
):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError(
"At most one of values, spacing, or number should be specified"
)
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self._ticks.set_ticksize(size)
if width is not None:
self._ticks.set_linewidth(width)
if color is not None:
self._ticks.set_color(color)
if alpha is not None:
self._ticks.set_alpha(alpha)
if direction is not None:
if direction in ("in", "out"):
self._ticks.set_tick_out(direction == "out")
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn(
"exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning,
)
self._ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear.
Parameters
----------
position : str or list
The axes on which the ticks for this coordinate should appear.
Should be a sequence containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis. In addition, if ``'#'`` is
included in the sequence, the position will be considered dynamic and
will be updated at draw-time in order to show the ticks on the same
axes as the tick labels are shown.
"""
self._ticks.set_visible_axes(position)
def get_ticks_position(self):
"""
Get where tick labels will appear.
"""
return list(self._ticks.get_visible_axes())
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self._ticks.set_visible(visible)
def get_ticks_visible(self):
"""
Get whether the ticks are currently visible.
"""
return self._ticks.get_visible()
def set_ticklabel(
self,
color=None,
size=None,
pad=None,
exclude_overlapping=None,
*,
simplify=True,
**kwargs,
):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
simplify : bool, optional
Whether to remove repeated parts of tick labels.
**kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self._ticklabels.set_size(size)
if color is not None:
self._ticklabels.set_color(color)
if pad is not None:
self._ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self._ticklabels.set_exclude_overlapping(exclude_overlapping)
self._ticklabels.set_simplify(simplify)
self._ticklabels.set(**kwargs)
def get_ticklabel_visible(self):
"""
Get whether the tick labels are currently visible.
"""
return self._ticklabels.get_visible()
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear.
Parameters
----------
position : str or list
The axes on which the tick labels for this coordinate should
appear. Should be a sequence containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis. In addition,
if ``'#'`` is included in the sequence, the position will be
considered dynamic and will be updated at draw-time in order to
attempt to optimize the layout of all the coordinates.
"""
self._ticklabels.set_visible_axes(position)
def get_ticklabel_position(self):
"""
Get where tick labels will appear.
"""
return list(self._ticklabels.get_visible_axes())
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self._ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
**kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop("fontdict", None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self._axislabel_set = True
self._axislabels.set_text(text)
self._axislabels.set_minpad(minpad)
self._axislabels.set(**kwargs)
if fontdict is not None:
self._axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label.
Returns
-------
label : str
The axis label
"""
if self._auto_axislabel and not self._axislabel_set:
return self._get_default_axislabel()
else:
return self._axislabels.get_text()
def get_axislabel_visible(self):
"""
Get whether the axis label is currently visible.
"""
return self._axislabels.get_visible()
def set_auto_axislabel(self, auto_label):
"""
Render default axis labels if no explicit label is provided.
Parameters
----------
auto_label : `bool`
`True` if default labels will be rendered.
"""
self._auto_axislabel = bool(auto_label)
def get_auto_axislabel(self):
"""
Render default axis labels if no explicit label is provided.
Returns
-------
auto_axislabel : `bool`
`True` if default labels will be rendered.
"""
return self._auto_axislabel
def _get_default_axislabel(self):
unit = self.get_format_unit() or self.coord_unit
if not unit or unit is u.one or self.coord_type in ("longitude", "latitude"):
return f"{self.default_label}"
else:
return f"{self.default_label} [{unit:latex}]"
def set_axislabel_position(self, position):
"""
Set where axis labels should appear.
Parameters
----------
position : str or list
The axes on which the axis label for this coordinate should
appear. Should be a sequence containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis. In addition, if
``'#'`` is included in the sequence, the position will be considered
dynamic and will be updated at draw-time in order to show the axis
label on the same axes as the tick labels are shown.
"""
self._axislabels.set_visible_axes(position)
def get_axislabel_position(self):
"""
Get where axis labels will appear.
"""
return list(self._axislabels.get_visible_axes())
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self._axislabels.set_visibility_rule(rule)
def set_visible(self, visible):
"""
Set the visibility for ticks, tick labels, and axis labels.
Parameters
----------
visible : bool
If 'True', show all elements.
If 'False', hide all elements.
"""
if isinstance(visible, bool):
self.set_ticks_visible(visible)
self.set_ticklabel_visible(visible)
self._axislabels.set_visible(visible)
else:
raise TypeError("visible must be a boolean")
def set_position(self, position):
"""
Set the position for ticks, tick labels, and axis labels.
Parameters
----------
position : str
Show all elements at the given position string (e.g. 't', 'lb', '#').
"""
if isinstance(position, str):
self.set_ticks_position(position)
self.set_ticklabel_position(position)
self.set_axislabel_position(position)
self.set_ticks_visible(True)
self.set_ticklabel_visible(True)
self._axislabels.set_visible(True)
else:
raise TypeError("position must be a string")
@deprecated_renamed_argument("rule", None, "7.2.0")
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self._axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._custom_formatter or self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group("grid lines")
if self._grid_lines_kwargs["visible"]:
if isinstance(self.frame, RectangularFrame1D):
self._update_grid_lines_1d()
else:
if self._grid_type == "lines":
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == "lines":
frame_patch = self.frame.patch
for path in self._grid_lines:
p = PathPatch(path, **self._grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
self._grid.set(**self._grid_lines_kwargs)
self._grid.draw(renderer)
renderer.close_group("grid lines")
def _draw_ticks(self, renderer, existing_bboxes):
"""
Draw all ticks and ticklabels.
Parameters
----------
existing_bboxes : list[Bbox]
All bboxes for ticks that have already been drawn by other
coordinates.
"""
renderer.open_group("ticks")
self._ticks.draw(renderer)
self._ticklabels._tick_out_size = self._ticks.out_size
self._ticklabels._set_existing_bboxes(existing_bboxes)
self._ticklabels.draw(renderer)
renderer.close_group("ticks")
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, visible_ticks):
# Render the default axis label if no axis label is set.
if self._auto_axislabel and not self._axislabel_set:
self.set_axislabel(self._get_default_axislabel())
renderer.open_group("axis labels")
self._axislabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=self._ticks.ticks_locs,
visible_ticks=visible_ticks,
)
renderer.close_group("axis labels")
def _update_ticks(self):
if self.coord_index is None:
return
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
coord_range = self.parent_map._coord_range
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(
*coord_range[self.coord_index]
)
if self._ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(
self._fl_spacing,
self.get_minor_frequency(),
*coord_range[self.coord_index],
)
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self._ticks.clear()
self._ticklabels.clear()
self._lblinfo = []
self._lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# https://matplotlib.org/stable/tutorials/advanced/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
if spine.data.size == 0:
continue
if not isinstance(self.frame, RectangularFrame1D):
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
if np.isnan(world0).all():
continue
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
with np.errstate(invalid="ignore"):
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == "lower" else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
with np.errstate(invalid="ignore"):
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = world1 - world0
dy = world2 - world0
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.0)
dy = wrap_angle_at(dy, 180.0)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack(
[spine.normal_angle, spine.normal_angle[-1]]
)
with np.errstate(invalid="ignore"):
reset = ((normal_angle_full - tick_angle) % 360 > 90.0) & (
(tick_angle - normal_angle_full) % 360 > 90.0
)
tick_angle[reset] -= 180.0
else:
rotation = 90 if axis == "b" else -90
tick_angle = np.zeros((conf.frame_boundary_samples,)) + rotation
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap.to_value(u.deg))
w2 = wrap_angle_at(w2, self.coord_wrap.to_value(u.deg))
with np.errstate(invalid="ignore"):
w1[w2 - w1 > 180.0] += 360
w2[w1 - w2 > 180.0] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self._ticks.get_display_minor_ticks():
self._compute_ticks(
minor_ticks_w_coordinates,
spine,
axis,
w1,
w2,
tick_angle,
ticks="minor",
)
# format tick labels, add to scene
text = self.formatter(u.Quantity(self._lbl_world), spacing=self._fl_spacing)
for kwargs, txt in zip(self._lblinfo, text):
self._ticklabels.add(text=txt, **kwargs)
def _compute_ticks(
self, tick_world_coordinates, spine, axis, w1, w2, tick_angle, ticks="major"
):
if self.coord_type == "longitude":
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack(
[tick_world_coordinates_values, tick_world_coordinates_values + 360]
)
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(
self.coord_unit
)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid="ignore"):
intersections = np.hstack(
[
np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0],
]
)
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.0e-13, atol=1.0e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (
spine.data[imax, 0] - spine.data[imin, 0]
)
y_data_i = spine.data[imin, 1] + frac * (
spine.data[imax, 1] - spine.data[imin, 1]
)
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.0:
delta_angle -= 360.0
elif delta_angle < -180.0:
delta_angle += 360.0
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap.to_value(u.deg))
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == "major":
self._ticks.add(
axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac,
)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self._lblinfo.append(
dict(
axis=axis,
data=(x_data_i, y_data_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac,
)
)
self._lbl_world.append(
(world * self.coord_unit).to(tick_world_coordinates.unit)
)
else:
self._ticks.add_minor(
minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac,
)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self._ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self._minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self._minor_frequency = frequency
def _update_grid_lines_1d(self):
if self.coord_index is None:
return
x_ticks_pos = [a[0] for a in self._ticks.pixel["b"]]
ymin, ymax = self.parent_axes.get_ylim()
self._grid_lines = []
for x_coord in x_ticks_pos:
pixel = [[x_coord, ymin], [x_coord, ymax]]
self._grid_lines.append(Path(pixel))
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
if self.coord_index is None:
return
coord_range = self.parent_map._coord_range
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
if n_coord == 0:
return
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self._grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(
coord_range[1][0], coord_range[1][1], n_samples
)
else:
xy_world[subset, 0] = np.linspace(
coord_range[0][0], coord_range[0][1], n_samples
)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self._grid_lines.append(
self._get_gridline(
xy_world[subset], pixel[subset], xy_world_round[subset]
)
)
def add_tickable_gridline(self, name, constant):
"""
Define a gridline that can be used for ticks and labels.
This gridline is not itself drawn, but instead can be specified in calls to
methods such as
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
for drawing ticks and labels. Since the gridline has a constant value in this
coordinate, and thus would not have any ticks or labels for the same coordinate,
the call to
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
would typically be made on the complementary coordinate.
Parameters
----------
name : str
The name for the gridline, usually a single character, but can be longer
constant : `~astropy.units.Quantity`
The constant coordinate value of the gridline
Notes
-----
A limitation is that the tickable part of the gridline must be contiguous. If
the gridline consists of more than one disconnected segment within the plot
extent, only one of those segments will be made tickable.
"""
if self.coord_index is None:
return
if name in self.frame:
raise ValueError(f"The frame already has a spine with the name '{name}'")
coord_range = self.parent_map.get_coord_range()
constant = constant.to_value(self.coord_unit)
from . import conf
n_samples = conf.grid_samples
# See comment in _update_grid_lines() about a WCS with more than 2 axes
xy_world = np.zeros((n_samples, 2))
xy_world[:, self.coord_index] = np.repeat(constant, n_samples)
# If the complementary coordinate is longitude, we attempt to close the gridline
# If such closure is a discontinuity, it will be filtered out later
if self.parent_map[1 - self.coord_index].coord_type == "longitude":
xy_world[:-1, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples - 1,
)
xy_world[-1, 1 - self.coord_index] = coord_range[1 - self.coord_index][0]
else:
xy_world[:, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples,
)
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
# Get the path of the gridline, which masks hidden parts
gridline = self._get_gridline(xy_world, pixel, xy_world_round)
def data_for_spine(spine):
vertices = gridline.vertices.copy()
codes = gridline.codes.copy()
# Retain the parts of the gridline within the rectangular plot bounds.
# We ought to use the potentially non-rectangular plot frame, but
# calculating that patch requires updating all spines first, which is a
# catch-22.
xmin, xmax = spine.parent_axes.get_xlim()
ymin, ymax = spine.parent_axes.get_ylim()
keep = (
(vertices[:, 0] >= xmin)
& (vertices[:, 0] <= xmax)
& (vertices[:, 1] >= ymin)
& (vertices[:, 1] <= ymax)
)
codes[~keep] = Path.MOVETO
codes[1:][~keep[:-1]] = Path.MOVETO
# We isolate the last segment (the last run of LINETOs), which must be preceded
# by at least one MOVETO and may be succeeded by MOVETOs.
# We have to account for longitude wrapping as well.
# Bail out if there is no visible segment
lineto = np.flatnonzero(codes == Path.LINETO)
if np.size(lineto) == 0:
return np.zeros((0, 2))
# Find the start of the last segment (the last MOVETO before the LINETOs)
last_segment = np.flatnonzero(codes[: lineto[-1]] == Path.MOVETO)[-1]
# Double the gridline if it is closed (i.e., spans all longitudes)
if vertices[0, 0] == vertices[-1, 0] and vertices[0, 1] == vertices[-1, 1]:
codes = np.concatenate([codes, codes[1:]])
vertices = np.vstack([vertices, vertices[1:, :]])
# Stop the last segment before any trailing MOVETOs
moveto = np.flatnonzero(codes[last_segment + 1 :] == Path.MOVETO)
if np.size(moveto) > 0:
return vertices[last_segment : last_segment + moveto[0] + 1, :]
else:
return vertices[last_segment:n_samples, :]
self.frame[name] = self.frame.spine_class(
self.frame.parent_axes, self.frame.transform, data_func=data_for_spine
)
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == "scalar":
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _clear_grid_contour(self):
if hasattr(self, "_grid") and self._grid:
self._grid.remove()
def _update_grid_contour(self):
if self.coord_index is None:
return
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res), np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map._coord_range
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == "longitude":
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (
tick_world_coordinates_values[0] + tick_world_coordinates_values[1]
)
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(
tick_world_coordinates_values, mid
)
# Replace wraps by NaN
with np.errstate(invalid="ignore"):
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (
np.abs(np.diff(field[:-1, :], axis=1)) > 180
)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
with np.errstate(invalid="ignore"):
self._grid = self.parent_axes.contour(
x,
y,
field.transpose(),
levels=np.sort(tick_world_coordinates_values),
)
else:
self._grid = None
def tick_params(self, which="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if "colors" in kwargs:
if "color" not in kwargs:
kwargs["color"] = kwargs["colors"]
if "labelcolor" not in kwargs:
kwargs["labelcolor"] = kwargs["colors"]
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == "minor":
if len(set(kwargs) - {"length"}) > 0:
raise ValueError(
"When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)"
)
else:
if "length" in kwargs:
self._ticks.set_minor_ticksize(kwargs["length"])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(
size=kwargs.get("length"),
width=kwargs.get("width"),
color=kwargs.get("color"),
direction=kwargs.get("direction"),
)
# Set the tick position
position = None
for arg in ("bottom", "left", "top", "right"):
if arg in kwargs and position is None:
position = ""
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(
color=kwargs.get("labelcolor"),
size=kwargs.get("labelsize"),
pad=kwargs.get("pad"),
)
# Set the tick label position
position = None
for arg in ("bottom", "left", "top", "right"):
if "label" + arg in kwargs and position is None:
position = ""
if kwargs.get("label" + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if "grid_color" in kwargs:
self._grid_lines_kwargs["edgecolor"] = kwargs["grid_color"]
if "grid_alpha" in kwargs:
self._grid_lines_kwargs["alpha"] = kwargs["grid_alpha"]
if "grid_linewidth" in kwargs:
self._grid_lines_kwargs["linewidth"] = kwargs["grid_linewidth"]
if "grid_linestyle" in kwargs:
if kwargs["grid_linestyle"] in LINES_TO_PATCHES_LINESTYLE:
self._grid_lines_kwargs["linestyle"] = LINES_TO_PATCHES_LINESTYLE[
kwargs["grid_linestyle"]
]
else:
self._grid_lines_kwargs["linestyle"] = kwargs["grid_linestyle"]
| CoordinateHelper |
python | fluentpython__example-code | attic/dicts/test_transformdict.py | {
"start": 9366,
"end": 9772
} | class ____(TransformDictMappingTests):
TransformDict = MyTransformDict
type2test = partial(MyTransformDict, str.lower)
def test_main(verbose=None):
test_classes = [TestTransformDict, TransformDictMappingTests,
TransformDictSubclassMappingTests]
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main(verbose=True)
| TransformDictSubclassMappingTests |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/triggers/msgraph.py | {
"start": 2764,
"end": 8542
} | class ____(BaseTrigger):
"""
A Microsoft Graph API trigger which allows you to execute an async REST call to the Microsoft Graph API.
:param url: The url being executed on the Microsoft Graph API (templated).
:param response_type: The expected return type of the response as a string. Possible value are: `bytes`,
`str`, `int`, `float`, `bool` and `datetime` (default is None).
:param method: The HTTP method being used to do the REST call (default is GET).
:param conn_id: The HTTP Connection ID to run the operator against (templated).
:param timeout: The HTTP timeout being used by the `KiotaRequestAdapter` (default is None).
When no timeout is specified or set to None then there is no HTTP timeout on each request.
:param proxies: A dict defining the HTTP proxies to be used (default is None).
:param scopes: The scopes to be used (default is ["https://graph.microsoft.com/.default"]).
:param api_version: The API version of the Microsoft Graph API to be used (default is v1).
You can pass an enum named APIVersion which has 2 possible members v1 and beta,
or you can pass a string as `v1.0` or `beta`.
:param serializer: Class which handles response serialization (default is ResponseSerializer).
Bytes will be base64 encoded into a string, so it can be stored as an XCom.
"""
def __init__(
self,
url: str,
response_type: str | None = None,
path_parameters: dict[str, Any] | None = None,
url_template: str | None = None,
method: str = "GET",
query_parameters: dict[str, Any] | None = None,
headers: dict[str, str] | None = None,
data: dict[str, Any] | str | BytesIO | None = None,
conn_id: str = KiotaRequestAdapterHook.default_conn_name,
timeout: float | None = None,
proxies: dict | None = None,
scopes: str | list[str] | None = None,
api_version: APIVersion | str | None = None,
serializer: type[ResponseSerializer] = ResponseSerializer,
):
super().__init__()
self.conn_id = conn_id
self.timeout = timeout
self.proxies = proxies
self.scopes = scopes
self.api_version = api_version
self.url = url
self.response_type = response_type
self.path_parameters = path_parameters
self.url_template = url_template
self.method = method
self.query_parameters = query_parameters
self.headers = headers
self.data = data
self.serializer: ResponseSerializer = self.resolve_type(serializer, default=ResponseSerializer)()
@classmethod
def resolve_type(cls, value: str | type, default) -> type:
if isinstance(value, str):
with suppress(ImportError):
return import_string(value)
return default
return value or default
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize the HttpTrigger arguments and classpath."""
return (
f"{self.__class__.__module__}.{self.__class__.__name__}",
{
"conn_id": self.conn_id,
"timeout": self.timeout,
"proxies": self.proxies,
"scopes": self.scopes,
"api_version": self.api_version,
"serializer": f"{self.serializer.__class__.__module__}.{self.serializer.__class__.__name__}",
"url": self.url,
"path_parameters": self.path_parameters,
"url_template": self.url_template,
"method": self.method,
"query_parameters": self.query_parameters,
"headers": self.headers,
"data": self.data,
"response_type": self.response_type,
},
)
def get_conn(self) -> RequestAdapter:
"""
Initiate a new RequestAdapter connection.
.. warning::
This method is deprecated.
"""
return self.hook.get_conn()
@cached_property
def hook(self) -> KiotaRequestAdapterHook:
return KiotaRequestAdapterHook(
conn_id=self.conn_id,
timeout=self.timeout,
proxies=self.proxies,
scopes=self.scopes,
api_version=self.api_version,
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make a series of asynchronous HTTP calls via a KiotaRequestAdapterHook."""
try:
response = await self.hook.run(
url=self.url,
response_type=self.response_type,
path_parameters=self.path_parameters,
method=self.method,
query_parameters=self.query_parameters,
headers=self.headers,
data=self.data,
)
self.log.debug("response: %s", response)
if response:
response_type = type(response)
self.log.debug("response type: %s", response_type)
yield TriggerEvent(
{
"status": "success",
"type": f"{response_type.__module__}.{response_type.__name__}",
"response": self.serializer.serialize(response),
}
)
else:
yield TriggerEvent(
{
"status": "success",
"type": None,
"response": None,
}
)
except Exception as e:
self.log.exception("An error occurred: %s", e)
yield TriggerEvent({"status": "failure", "message": str(e)})
| MSGraphTrigger |
python | ansible__ansible | lib/ansible/module_utils/facts/virtual/linux.py | {
"start": 17960,
"end": 18062
} | class ____(VirtualCollector):
_fact_class = LinuxVirtual
_platform = 'Linux'
| LinuxVirtualCollector |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 8816,
"end": 9323
} | class ____(BaseModel):
number_of_peers: int = Field(..., description="")
term: int = Field(..., description="")
commit: int = Field(..., description="")
pending_operations: int = Field(..., description="")
role: Optional["StateRole"] = Field(default=None, description="")
is_voter: bool = Field(..., description="")
peer_id: Optional[int] = Field(default=None, description="")
consensus_thread_status: "ConsensusThreadStatus" = Field(..., description="")
| ClusterStatusTelemetry |
python | huggingface__transformers | src/transformers/models/mra/modeling_mra.py | {
"start": 6920,
"end": 8016
} | class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, dense_query, dense_key, indices, block_size):
sparse_qk_prod = mm_to_sparse(dense_query, dense_key, indices, block_size)
ctx.save_for_backward(dense_query, dense_key, indices)
ctx.block_size = block_size
return sparse_qk_prod
@staticmethod
def backward(ctx, grad):
dense_query, dense_key, indices = ctx.saved_tensors
block_size = ctx.block_size
query_num_block = dense_query.size(1) // block_size
key_num_block = dense_key.size(1) // block_size
indices_T = transpose_indices(indices, query_num_block, key_num_block)
grad_key = sparse_dense_mm(grad.transpose(-1, -2), indices_T, dense_query, key_num_block)
grad_query = sparse_dense_mm(grad, indices, dense_key, query_num_block)
return grad_query, grad_key, None, None
@staticmethod
def operator_call(dense_query, dense_key, indices, block_size=32):
return MraSampledDenseMatMul.apply(dense_query, dense_key, indices, block_size)
| MraSampledDenseMatMul |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/document_summary/retrievers.py | {
"start": 954,
"end": 4685
} | class ____(BaseRetriever):
"""
Document Summary Index LLM Retriever.
By default, select relevant summaries from index using LLM calls.
Args:
index (DocumentSummaryIndex): The index to retrieve from.
choice_select_prompt (Optional[BasePromptTemplate]): The prompt to use for selecting relevant summaries.
choice_batch_size (int): The number of summary nodes to send to LLM at a time.
choice_top_k (int): The number of summary nodes to retrieve.
format_node_batch_fn (Callable): Function to format a batch of nodes for LLM.
parse_choice_select_answer_fn (Callable): Function to parse LLM response.
llm (LLM): The llm to use.
"""
def __init__(
self,
index: DocumentSummaryIndex,
choice_select_prompt: Optional[BasePromptTemplate] = None,
choice_batch_size: int = 10,
choice_top_k: int = 1,
format_node_batch_fn: Optional[Callable] = None,
parse_choice_select_answer_fn: Optional[Callable] = None,
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
self._index = index
self._choice_select_prompt = (
choice_select_prompt or DEFAULT_CHOICE_SELECT_PROMPT
)
self._choice_batch_size = choice_batch_size
self._choice_top_k = choice_top_k
self._format_node_batch_fn = (
format_node_batch_fn or default_format_node_batch_fn
)
self._parse_choice_select_answer_fn = (
parse_choice_select_answer_fn or default_parse_choice_select_answer_fn
)
self._llm = llm or Settings.llm
super().__init__(
callback_manager=callback_manager or Settings.callback_manager,
object_map=object_map,
verbose=verbose,
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Retrieve nodes."""
summary_ids = self._index.index_struct.summary_ids
all_summary_ids: List[str] = []
all_relevances: List[float] = []
for idx in range(0, len(summary_ids), self._choice_batch_size):
summary_ids_batch = summary_ids[idx : idx + self._choice_batch_size]
summary_nodes = self._index.docstore.get_nodes(summary_ids_batch)
query_str = query_bundle.query_str
fmt_batch_str = self._format_node_batch_fn(summary_nodes)
# call each batch independently
raw_response = self._llm.predict(
self._choice_select_prompt,
context_str=fmt_batch_str,
query_str=query_str,
)
raw_choices, relevances = self._parse_choice_select_answer_fn(
raw_response, len(summary_nodes)
)
choice_idxs = [choice - 1 for choice in raw_choices]
choice_summary_ids = [summary_ids_batch[ci] for ci in choice_idxs]
all_summary_ids.extend(choice_summary_ids)
all_relevances.extend(relevances)
zipped_list = list(zip(all_summary_ids, all_relevances))
sorted_list = sorted(zipped_list, key=lambda x: x[1], reverse=True)
top_k_list = sorted_list[: self._choice_top_k]
results = []
for summary_id, relevance in top_k_list:
node_ids = self._index.index_struct.summary_id_to_node_ids[summary_id]
nodes = self._index.docstore.get_nodes(node_ids)
results.extend([NodeWithScore(node=n, score=relevance) for n in nodes])
return results
| DocumentSummaryIndexLLMRetriever |
python | huggingface__transformers | utils/modular_model_converter.py | {
"start": 8600,
"end": 10637
} | class ____(cst.CSTTransformer):
"""
This Transformer is used to replace all calls of the form `module.Class.func(...)` by a call of the form
`super().func(...)`.
"""
def __init__(self, new_bases: list[str]):
self.new_bases = new_bases
def is_call_to_parent_class(self, node: cst.SimpleStatementLine):
"""Check whether `node` corresponds to a call to a parent class function, such as `module.Parent.func_name(...)`"""
return m.matches(node, m.Call(func=m.Attribute(value=m.Name() | m.Attribute())))
def leave_Call(self, original_node: cst.Call, updated_node: cst.Call) -> cst.Call:
"""Replace a call of the form `module.Class.func(...)` by a call of the form `super().func(...)`
if the `Class` being called is one of the bases."""
if self.is_call_to_parent_class(updated_node):
full_parent_class_name = get_full_attribute_name(updated_node.func.value)
# Replace only if it's a base, or a few special rules
if (
full_parent_class_name in self.new_bases
or (full_parent_class_name == "nn.Module" and "GradientCheckpointingLayer" in self.new_bases)
or (
full_parent_class_name == "PreTrainedModel"
and any("PreTrainedModel" in base for base in self.new_bases)
)
):
# Replace `full_parent_class_name.func(...)` with `super().func(...)`
attribute_node = updated_node.func.with_changes(value=cst.Call(func=cst.Name("super")))
# Check if the first argument is 'self', and remove it
new_args = (
updated_node.args[1:]
if len(updated_node.args) > 0 and m.matches(updated_node.args[0].value, m.Name("self"))
else updated_node.args
)
return updated_node.with_changes(func=attribute_node, args=new_args)
return updated_node
| ReplaceParentClassCallTransformer |
python | plotly__plotly.py | plotly/graph_objs/scatterpolargl/_unselected.py | {
"start": 233,
"end": 3445
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolargl"
_path_str = "scatterpolargl.unselected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scatterpolargl.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scatterpolargl.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scatterpolargl.unselected.
Marker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolargl.unselected.
Textfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.Unselected`
marker
:class:`plotly.graph_objects.scatterpolargl.unselected.
Marker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolargl.unselected.
Textfont` instance or dict with compatible properties
Returns
-------
Unselected
"""
super().__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolargl.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.Unselected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Unselected |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 17933,
"end": 18460
} | class ____:
def __init__(self, left: typing.Optional["Tree"], right: typing.Optional["Tree"]):
self.left = left
self.right = right
def __repr__(self):
return f"Tree({self.left}, {self.right})"
@pytest.mark.skipif(
settings.get_current_profile_name() == "crosshair",
reason="takes ~19 mins; datastructure explosion https://github.com/pschanely/hypothesis-crosshair/issues/27",
)
@given(tree=st.builds(Tree))
def test_resolving_recursive_type(tree):
assert isinstance(tree, Tree)
| Tree |
python | bokeh__bokeh | src/bokeh/models/widgets/pickers.py | {
"start": 10230,
"end": 10618
} | class ____(BaseDatetimePicker):
""" Calendar-based picker of date and time ranges. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
value = Nullable(Tuple(Datetime, Datetime), default=None, help="""
The initial or picked date and time range.
""")
| DatetimeRangePicker |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 11391,
"end": 12966
} | class ____(Module):
r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
.. math::
\text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
.. note::
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
where the SiLU (Sigmoid Linear Unit) was originally coined, and see
`Sigmoid-Weighted Linear Units for Neural Network Function Approximation
in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
where the SiLU was experimented with later.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/SiLU.png
Examples::
>>> m = nn.SiLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["inplace"]
inplace: bool
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.silu(input, inplace=self.inplace)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
inplace_str = "inplace=True" if self.inplace else ""
return inplace_str
| SiLU |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 6627,
"end": 11050
} | class ____(RecognitionException):
NONE = 0
CHAR = 1
NOT_CHAR = 2
RANGE = 3
NOT_RANGE = 4
SET = 5
NOT_SET = 6
def __init__(self, *args):
self.args = args
if len(args) == 5:
# Expected range / not range
if args[3]:
self.mismatchType = MismatchedCharException.NOT_RANGE
else:
self.mismatchType = MismatchedCharException.RANGE
self.foundChar = args[0]
self.expecting = args[1]
self.upper = args[2]
self.scanner = args[4]
RecognitionException.__init__(self, "Mismatched char range",
self.scanner.getFilename(),
self.scanner.getLine(),
self.scanner.getColumn())
elif len(args) == 4 and is_string_type(args[1]):
# Expected char / not char
if args[2]:
self.mismatchType = MismatchedCharException.NOT_CHAR
else:
self.mismatchType = MismatchedCharException.CHAR
self.foundChar = args[0]
self.expecting = args[1]
self.scanner = args[3]
RecognitionException.__init__(self, "Mismatched char",
self.scanner.getFilename(),
self.scanner.getLine(),
self.scanner.getColumn())
elif len(args) == 4 and isinstance(args[1], BitSet):
# Expected BitSet / not BitSet
if args[2]:
self.mismatchType = MismatchedCharException.NOT_SET
else:
self.mismatchType = MismatchedCharException.SET
self.foundChar = args[0]
self.set = args[1]
self.scanner = args[3]
RecognitionException.__init__(self, "Mismatched char set",
self.scanner.getFilename(),
self.scanner.getLine(),
self.scanner.getColumn())
else:
self.mismatchType = MismatchedCharException.NONE
RecognitionException.__init__(self, "Mismatched char")
## Append a char to the msg buffer. If special,
# then show escaped version
#
def appendCharName(self, sb, c):
if not c or c == 65535:
# 65535 = (char) -1 = EOF
sb.append("'<EOF>'")
elif c == '\n':
sb.append("'\\n'")
elif c == '\r':
sb.append("'\\r'");
elif c == '\t':
sb.append("'\\t'")
else:
sb.append('\'' + c + '\'')
##
# Returns an error message with line number/column information
#
def __str__(self):
sb = ['']
sb.append(RecognitionException.__str__(self))
if self.mismatchType == MismatchedCharException.CHAR:
sb.append("expecting ")
self.appendCharName(sb, self.expecting)
sb.append(", found ")
self.appendCharName(sb, self.foundChar)
elif self.mismatchType == MismatchedCharException.NOT_CHAR:
sb.append("expecting anything but '")
self.appendCharName(sb, self.expecting)
sb.append("'; got it anyway")
elif self.mismatchType in [MismatchedCharException.RANGE, MismatchedCharException.NOT_RANGE]:
sb.append("expecting char ")
if self.mismatchType == MismatchedCharException.NOT_RANGE:
sb.append("NOT ")
sb.append("in range: ")
self.appendCharName(sb, self.expecting)
sb.append("..")
self.appendCharName(sb, self.upper)
sb.append(", found ")
self.appendCharName(sb, self.foundChar)
elif self.mismatchType in [MismatchedCharException.SET, MismatchedCharException.NOT_SET]:
sb.append("expecting ")
if self.mismatchType == MismatchedCharException.NOT_SET:
sb.append("NOT ")
sb.append("one of (")
for i in range(len(self.set)):
self.appendCharName(sb, self.set[i])
sb.append("), found ")
self.appendCharName(sb, self.foundChar)
return str().join(sb).strip()
__repr__ = __str__
| MismatchedCharException |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 13435,
"end": 14047
} | class ____:
xlFillCopy = 1 # from enum XlAutoFillType
xlFillDays = 5 # from enum XlAutoFillType
xlFillDefault = 0 # from enum XlAutoFillType
xlFillFormats = 3 # from enum XlAutoFillType
xlFillMonths = 7 # from enum XlAutoFillType
xlFillSeries = 2 # from enum XlAutoFillType
xlFillValues = 4 # from enum XlAutoFillType
xlFillWeekdays = 6 # from enum XlAutoFillType
xlFillYears = 8 # from enum XlAutoFillType
xlGrowthTrend = 10 # from enum XlAutoFillType
xlLinearTrend = 9 # from enum XlAutoFillType
xlFlashFill = 11 # from enum XlAutoFillType
| AutoFillType |
python | google__pytype | pytype/test_data/simple.py | {
"start": 42,
"end": 130
} | class ____:
def dominate(self):
pass
def helloworld():
print("hello world")
| World |
python | getsentry__sentry-python | sentry_sdk/transport.py | {
"start": 5624,
"end": 20915
} | class ____(Transport):
"""The base HTTP transport."""
TIMEOUT = 30 # seconds
def __init__(self, options):
# type: (Self, Dict[str, Any]) -> None
from sentry_sdk.consts import VERSION
Transport.__init__(self, options)
assert self.parsed_dsn is not None
self.options = options # type: Dict[str, Any]
self._worker = BackgroundWorker(queue_size=options["transport_queue_size"])
self._auth = self.parsed_dsn.to_auth("sentry.python/%s" % VERSION)
self._disabled_until = {} # type: Dict[Optional[EventDataCategory], datetime]
# We only use this Retry() class for the `get_retry_after` method it exposes
self._retry = urllib3.util.Retry()
self._discarded_events = defaultdict(int) # type: DefaultDict[Tuple[EventDataCategory, str], int]
self._last_client_report_sent = time.time()
self._pool = self._make_pool()
# Backwards compatibility for deprecated `self.hub_class` attribute
self._hub_cls = sentry_sdk.Hub
experiments = options.get("_experiments", {})
compression_level = experiments.get(
"transport_compression_level",
experiments.get("transport_zlib_compression_level"),
)
compression_algo = experiments.get(
"transport_compression_algo",
(
"gzip"
# if only compression level is set, assume gzip for backwards compatibility
# if we don't have brotli available, fallback to gzip
if compression_level is not None or brotli is None
else "br"
),
)
if compression_algo == "br" and brotli is None:
logger.warning(
"You asked for brotli compression without the Brotli module, falling back to gzip -9"
)
compression_algo = "gzip"
compression_level = None
if compression_algo not in ("br", "gzip"):
logger.warning(
"Unknown compression algo %s, disabling compression", compression_algo
)
self._compression_level = 0
self._compression_algo = None
else:
self._compression_algo = compression_algo
if compression_level is not None:
self._compression_level = compression_level
elif self._compression_algo == "gzip":
self._compression_level = 9
elif self._compression_algo == "br":
self._compression_level = 4
def record_lost_event(
self,
reason, # type: str
data_category=None, # type: Optional[EventDataCategory]
item=None, # type: Optional[Item]
*,
quantity=1, # type: int
):
# type: (...) -> None
if not self.options["send_client_reports"]:
return
if item is not None:
data_category = item.data_category
quantity = 1 # If an item is provided, we always count it as 1 (except for attachments, handled below).
if data_category == "transaction":
# Also record the lost spans
event = item.get_transaction_event() or {}
# +1 for the transaction itself
span_count = (
len(cast(List[Dict[str, object]], event.get("spans") or [])) + 1
)
self.record_lost_event(reason, "span", quantity=span_count)
elif data_category == "log_item" and item:
# Also record size of lost logs in bytes
bytes_size = len(item.get_bytes())
self.record_lost_event(reason, "log_byte", quantity=bytes_size)
elif data_category == "attachment":
# quantity of 0 is actually 1 as we do not want to count
# empty attachments as actually empty.
quantity = len(item.get_bytes()) or 1
elif data_category is None:
raise TypeError("data category not provided")
self._discarded_events[data_category, reason] += quantity
def _get_header_value(self, response, header):
# type: (Self, Any, str) -> Optional[str]
return response.headers.get(header)
def _update_rate_limits(self, response):
# type: (Self, Union[urllib3.BaseHTTPResponse, httpcore.Response]) -> None
# new sentries with more rate limit insights. We honor this header
# no matter of the status code to update our internal rate limits.
header = self._get_header_value(response, "x-sentry-rate-limits")
if header:
logger.warning("Rate-limited via x-sentry-rate-limits")
self._disabled_until.update(_parse_rate_limits(header))
# old sentries only communicate global rate limit hits via the
# retry-after header on 429. This header can also be emitted on new
# sentries if a proxy in front wants to globally slow things down.
elif response.status == 429:
logger.warning("Rate-limited via 429")
retry_after_value = self._get_header_value(response, "Retry-After")
retry_after = (
self._retry.parse_retry_after(retry_after_value)
if retry_after_value is not None
else None
) or 60
self._disabled_until[None] = datetime.now(timezone.utc) + timedelta(
seconds=retry_after
)
def _send_request(
self,
body,
headers,
endpoint_type=EndpointType.ENVELOPE,
envelope=None,
):
# type: (Self, bytes, Dict[str, str], EndpointType, Optional[Envelope]) -> None
def record_loss(reason):
# type: (str) -> None
if envelope is None:
self.record_lost_event(reason, data_category="error")
else:
for item in envelope.items:
self.record_lost_event(reason, item=item)
headers.update(
{
"User-Agent": str(self._auth.client),
"X-Sentry-Auth": str(self._auth.to_header()),
}
)
try:
response = self._request(
"POST",
endpoint_type,
body,
headers,
)
except Exception:
self.on_dropped_event("network")
record_loss("network_error")
raise
try:
self._update_rate_limits(response)
if response.status == 429:
# if we hit a 429. Something was rate limited but we already
# acted on this in `self._update_rate_limits`. Note that we
# do not want to record event loss here as we will have recorded
# an outcome in relay already.
self.on_dropped_event("status_429")
pass
elif response.status >= 300 or response.status < 200:
logger.error(
"Unexpected status code: %s (body: %s)",
response.status,
getattr(response, "data", getattr(response, "content", None)),
)
self.on_dropped_event("status_{}".format(response.status))
record_loss("network_error")
finally:
response.close()
def on_dropped_event(self, _reason):
# type: (Self, str) -> None
return None
def _fetch_pending_client_report(self, force=False, interval=60):
# type: (Self, bool, int) -> Optional[Item]
if not self.options["send_client_reports"]:
return None
if not (force or self._last_client_report_sent < time.time() - interval):
return None
discarded_events = self._discarded_events
self._discarded_events = defaultdict(int)
self._last_client_report_sent = time.time()
if not discarded_events:
return None
return Item(
PayloadRef(
json={
"timestamp": time.time(),
"discarded_events": [
{"reason": reason, "category": category, "quantity": quantity}
for (
(category, reason),
quantity,
) in discarded_events.items()
],
}
),
type="client_report",
)
def _flush_client_reports(self, force=False):
# type: (Self, bool) -> None
client_report = self._fetch_pending_client_report(force=force, interval=60)
if client_report is not None:
self.capture_envelope(Envelope(items=[client_report]))
def _check_disabled(self, category):
# type: (str) -> bool
def _disabled(bucket):
# type: (Any) -> bool
ts = self._disabled_until.get(bucket)
return ts is not None and ts > datetime.now(timezone.utc)
return _disabled(category) or _disabled(None)
def _is_rate_limited(self):
# type: (Self) -> bool
return any(
ts > datetime.now(timezone.utc) for ts in self._disabled_until.values()
)
def _is_worker_full(self):
# type: (Self) -> bool
return self._worker.full()
def is_healthy(self):
# type: (Self) -> bool
return not (self._is_worker_full() or self._is_rate_limited())
def _send_envelope(self, envelope):
# type: (Self, Envelope) -> None
# remove all items from the envelope which are over quota
new_items = []
for item in envelope.items:
if self._check_disabled(item.data_category):
if item.data_category in ("transaction", "error", "default", "statsd"):
self.on_dropped_event("self_rate_limits")
self.record_lost_event("ratelimit_backoff", item=item)
else:
new_items.append(item)
# Since we're modifying the envelope here make a copy so that others
# that hold references do not see their envelope modified.
envelope = Envelope(headers=envelope.headers, items=new_items)
if not envelope.items:
return None
# since we're already in the business of sending out an envelope here
# check if we have one pending for the stats session envelopes so we
# can attach it to this enveloped scheduled for sending. This will
# currently typically attach the client report to the most recent
# session update.
client_report_item = self._fetch_pending_client_report(interval=30)
if client_report_item is not None:
envelope.items.append(client_report_item)
content_encoding, body = self._serialize_envelope(envelope)
assert self.parsed_dsn is not None
logger.debug(
"Sending envelope [%s] project:%s host:%s",
envelope.description,
self.parsed_dsn.project_id,
self.parsed_dsn.host,
)
headers = {
"Content-Type": "application/x-sentry-envelope",
}
if content_encoding:
headers["Content-Encoding"] = content_encoding
self._send_request(
body.getvalue(),
headers=headers,
endpoint_type=EndpointType.ENVELOPE,
envelope=envelope,
)
return None
def _serialize_envelope(self, envelope):
# type: (Self, Envelope) -> tuple[Optional[str], io.BytesIO]
content_encoding = None
body = io.BytesIO()
if self._compression_level == 0 or self._compression_algo is None:
envelope.serialize_into(body)
else:
content_encoding = self._compression_algo
if self._compression_algo == "br" and brotli is not None:
body.write(
brotli.compress(
envelope.serialize(), quality=self._compression_level
)
)
else: # assume gzip as we sanitize the algo value in init
with gzip.GzipFile(
fileobj=body, mode="w", compresslevel=self._compression_level
) as f:
envelope.serialize_into(f)
return content_encoding, body
def _get_pool_options(self):
# type: (Self) -> Dict[str, Any]
raise NotImplementedError()
def _in_no_proxy(self, parsed_dsn):
# type: (Self, Dsn) -> bool
no_proxy = getproxies().get("no")
if not no_proxy:
return False
for host in no_proxy.split(","):
host = host.strip()
if parsed_dsn.host.endswith(host) or parsed_dsn.netloc.endswith(host):
return True
return False
def _make_pool(self):
# type: (Self) -> Union[PoolManager, ProxyManager, httpcore.SOCKSProxy, httpcore.HTTPProxy, httpcore.ConnectionPool]
raise NotImplementedError()
def _request(
self,
method,
endpoint_type,
body,
headers,
):
# type: (Self, str, EndpointType, Any, Mapping[str, str]) -> Union[urllib3.BaseHTTPResponse, httpcore.Response]
raise NotImplementedError()
def capture_envelope(
self,
envelope, # type: Envelope
):
# type: (...) -> None
def send_envelope_wrapper():
# type: () -> None
with capture_internal_exceptions():
self._send_envelope(envelope)
self._flush_client_reports()
if not self._worker.submit(send_envelope_wrapper):
self.on_dropped_event("full_queue")
for item in envelope.items:
self.record_lost_event("queue_overflow", item=item)
def flush(
self,
timeout,
callback=None,
):
# type: (Self, float, Optional[Callable[[int, float], None]]) -> None
logger.debug("Flushing HTTP transport")
if timeout > 0:
self._worker.submit(lambda: self._flush_client_reports(force=True))
self._worker.flush(timeout, callback)
def kill(self):
# type: (Self) -> None
logger.debug("Killing HTTP transport")
self._worker.kill()
@staticmethod
def _warn_hub_cls():
# type: () -> None
"""Convenience method to warn users about the deprecation of the `hub_cls` attribute."""
warnings.warn(
"The `hub_cls` attribute is deprecated and will be removed in a future release.",
DeprecationWarning,
stacklevel=3,
)
@property
def hub_cls(self):
# type: (Self) -> type[sentry_sdk.Hub]
"""DEPRECATED: This attribute is deprecated and will be removed in a future release."""
HttpTransport._warn_hub_cls()
return self._hub_cls
@hub_cls.setter
def hub_cls(self, value):
# type: (Self, type[sentry_sdk.Hub]) -> None
"""DEPRECATED: This attribute is deprecated and will be removed in a future release."""
HttpTransport._warn_hub_cls()
self._hub_cls = value
| BaseHttpTransport |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum12.py | {
"start": 259,
"end": 482
} | class ____(Enum):
# This should generate an error.
MEMBER_1: int = 1
# This should generate an error.
MEMBER_2: Final = 3
_NON_MEMBER_: int = 3
NON_MEMBER_CALLABLE: Callable[[], int] = lambda: 1
| Enum1 |
python | kamyu104__LeetCode-Solutions | Python/maximize-amount-after-two-days-of-conversions.py | {
"start": 56,
"end": 981
} | class ____(object):
def maxAmount(self, initialCurrency, pairs1, rates1, pairs2, rates2):
"""
:type initialCurrency: str
:type pairs1: List[List[str]]
:type rates1: List[float]
:type pairs2: List[List[str]]
:type rates2: List[float]
:rtype: float
"""
def BellmanFord(dist, pairs, rates):
for _ in xrange(len(pairs)):
for i in xrange(len(pairs)):
dist[pairs[i][1]] = max(dist[pairs[i][1]], dist[pairs[i][0]]*rates[i])
dist[pairs[i][0]] = max(dist[pairs[i][0]], dist[pairs[i][1]]*(1/rates[i]))
dist = collections.defaultdict(int)
dist[initialCurrency] = 1.0
BellmanFord(dist, pairs1, rates1)
BellmanFord(dist, pairs2, rates2)
return dist[initialCurrency]
# Time: O(n^2)
# Space: O(n)
import collections
# bfs
| Solution |
python | ray-project__ray | rllib/connectors/connector_pipeline_v2.py | {
"start": 715,
"end": 15598
} | class ____(ConnectorV2):
"""Utility class for quick manipulation of a connector pipeline."""
@override(ConnectorV2)
def recompute_output_observation_space(
self,
input_observation_space: gym.Space,
input_action_space: gym.Space,
) -> gym.Space:
self._fix_spaces(input_observation_space, input_action_space)
return self.observation_space
@override(ConnectorV2)
def recompute_output_action_space(
self,
input_observation_space: gym.Space,
input_action_space: gym.Space,
) -> gym.Space:
self._fix_spaces(input_observation_space, input_action_space)
return self.action_space
def __init__(
self,
input_observation_space: Optional[gym.Space] = None,
input_action_space: Optional[gym.Space] = None,
*,
connectors: Optional[List[ConnectorV2]] = None,
**kwargs,
):
"""Initializes a ConnectorPipelineV2 instance.
Args:
input_observation_space: The (optional) input observation space for this
connector piece. This is the space coming from a previous connector
piece in the (env-to-module or learner) pipeline or is directly
defined within the gym.Env.
input_action_space: The (optional) input action space for this connector
piece. This is the space coming from a previous connector piece in the
(module-to-env) pipeline or is directly defined within the gym.Env.
connectors: A list of individual ConnectorV2 pieces to be added to this
pipeline during construction. Note that you can always add (or remove)
more ConnectorV2 pieces later on the fly.
"""
self.connectors = []
for conn in connectors:
# If we have a `ConnectorV2` instance just append.
if isinstance(conn, ConnectorV2):
self.connectors.append(conn)
# If, we have a class with `args` and `kwargs`, build the instance.
# Note that this way of constructing a pipeline should only be
# used internally when restoring the pipeline state from a
# checkpoint.
elif isinstance(conn, tuple) and len(conn) == 3:
self.connectors.append(conn[0](*conn[1], **conn[2]))
super().__init__(input_observation_space, input_action_space, **kwargs)
def __len__(self):
return len(self.connectors)
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Dict[str, Any],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
metrics: Optional[MetricsLogger] = None,
**kwargs,
) -> Any:
"""In a pipeline, we simply call each of our connector pieces after each other.
Each connector piece receives as input the output of the previous connector
piece in the pipeline.
"""
shared_data = shared_data if shared_data is not None else {}
full_stats = None
if metrics:
full_stats = metrics.log_time(
kwargs.get("metrics_prefix_key", ()) + (CONNECTOR_PIPELINE_TIMER,)
)
full_stats.__enter__()
# Loop through connector pieces and call each one with the output of the
# previous one. Thereby, time each connector piece's call.
for connector in self.connectors:
# TODO (sven): Add MetricsLogger to non-Learner components that have a
# LearnerConnector pipeline.
stats = None
if metrics:
stats = metrics.log_time(
kwargs.get("metrics_prefix_key", ())
+ (
TIMERS,
CONNECTOR_TIMERS,
to_snake_case(connector.__class__.__name__),
)
)
stats.__enter__()
batch = connector(
rl_module=rl_module,
batch=batch,
episodes=episodes,
explore=explore,
shared_data=shared_data,
metrics=metrics,
# Deprecated arg.
data=batch,
**kwargs,
)
if metrics:
stats.__exit__(None, None, None)
if not isinstance(batch, dict):
raise ValueError(
f"`data` returned by ConnectorV2 {connector} must be a dict! "
f"You returned {batch}. Check your (custom) connectors' "
f"`__call__()` method's return value and make sure you return "
f"the `batch` arg passed in (either altered or unchanged)."
)
if metrics:
full_stats.__exit__(None, None, None)
return batch
def remove(self, name_or_class: Union[str, Type]):
"""Remove a single connector piece in this pipeline by its name or class.
Args:
name_or_class: The name of the connector piece to be removed from the
pipeline.
"""
idx = -1
for i, c in enumerate(self.connectors):
if (isinstance(name_or_class, type) and c.__class__ is name_or_class) or (
isinstance(name_or_class, str) and c.__class__.__name__ == name_or_class
):
idx = i
break
if idx >= 0:
del self.connectors[idx]
self._fix_spaces(self.input_observation_space, self.input_action_space)
logger.info(
f"Removed connector {name_or_class} from {self.__class__.__name__}."
)
else:
logger.warning(
f"Trying to remove a non-existent connector {name_or_class}."
)
def insert_before(
self,
name_or_class: Union[str, type],
connector: ConnectorV2,
) -> ConnectorV2:
"""Insert a new connector piece before an existing piece (by name or class).
Args:
name_or_class: Name or class of the connector piece before which `connector`
will get inserted.
connector: The new connector piece to be inserted.
Returns:
The ConnectorV2 before which `connector` has been inserted.
"""
idx = -1
for idx, c in enumerate(self.connectors):
if (
isinstance(name_or_class, str) and c.__class__.__name__ == name_or_class
) or (isinstance(name_or_class, type) and c.__class__ is name_or_class):
break
if idx < 0:
raise ValueError(
f"Can not find connector with name or type '{name_or_class}'!"
)
next_connector = self.connectors[idx]
self.connectors.insert(idx, connector)
self._fix_spaces(self.input_observation_space, self.input_action_space)
logger.info(
f"Inserted {connector.__class__.__name__} before {name_or_class} "
f"to {self.__class__.__name__}."
)
return next_connector
def insert_after(
self,
name_or_class: Union[str, Type],
connector: ConnectorV2,
) -> ConnectorV2:
"""Insert a new connector piece after an existing piece (by name or class).
Args:
name_or_class: Name or class of the connector piece after which `connector`
will get inserted.
connector: The new connector piece to be inserted.
Returns:
The ConnectorV2 after which `connector` has been inserted.
"""
idx = -1
for idx, c in enumerate(self.connectors):
if (
isinstance(name_or_class, str) and c.__class__.__name__ == name_or_class
) or (isinstance(name_or_class, type) and c.__class__ is name_or_class):
break
if idx < 0:
raise ValueError(
f"Can not find connector with name or type '{name_or_class}'!"
)
prev_connector = self.connectors[idx]
self.connectors.insert(idx + 1, connector)
self._fix_spaces(self.input_observation_space, self.input_action_space)
logger.info(
f"Inserted {connector.__class__.__name__} after {name_or_class} "
f"to {self.__class__.__name__}."
)
return prev_connector
def prepend(self, connector: ConnectorV2) -> None:
"""Prepend a new connector at the beginning of a connector pipeline.
Args:
connector: The new connector piece to be prepended to this pipeline.
"""
self.connectors.insert(0, connector)
self._fix_spaces(self.input_observation_space, self.input_action_space)
logger.info(
f"Added {connector.__class__.__name__} to the beginning of "
f"{self.__class__.__name__}."
)
def append(self, connector: ConnectorV2) -> None:
"""Append a new connector at the end of a connector pipeline.
Args:
connector: The new connector piece to be appended to this pipeline.
"""
self.connectors.append(connector)
self._fix_spaces(self.input_observation_space, self.input_action_space)
logger.info(
f"Added {connector.__class__.__name__} to the end of "
f"{self.__class__.__name__}."
)
@override(ConnectorV2)
def get_state(
self,
components: Optional[Union[str, Collection[str]]] = None,
*,
not_components: Optional[Union[str, Collection[str]]] = None,
**kwargs,
) -> StateDict:
state = {}
for conn in self.connectors:
conn_name = type(conn).__name__
if self._check_component(conn_name, components, not_components):
sts = conn.get_state(
components=self._get_subcomponents(conn_name, components),
not_components=self._get_subcomponents(conn_name, not_components),
**kwargs,
)
# Ignore empty dicts.
if sts:
state[conn_name] = sts
return state
@override(ConnectorV2)
def set_state(self, state: Dict[str, Any]) -> None:
for conn in self.connectors:
conn_name = type(conn).__name__
if conn_name in state:
conn.set_state(state[conn_name])
@override(Checkpointable)
def get_checkpointable_components(self) -> List[Tuple[str, "Checkpointable"]]:
return [(type(conn).__name__, conn) for conn in self.connectors]
# Note that we don't have to override Checkpointable.get_ctor_args_and_kwargs and
# don't have to return the `connectors` c'tor kwarg from there. This is b/c all
# connector pieces in this pipeline are themselves Checkpointable components,
# so they will be properly written into this pipeline's checkpoint.
@override(Checkpointable)
def get_ctor_args_and_kwargs(self) -> Tuple[Tuple, Dict[str, Any]]:
return (
(self.input_observation_space, self.input_action_space), # *args
{
"connectors": [
(type(conn), *conn.get_ctor_args_and_kwargs())
for conn in self.connectors
]
},
)
@override(ConnectorV2)
def reset_state(self) -> None:
for conn in self.connectors:
conn.reset_state()
@override(ConnectorV2)
def merge_states(self, states: List[Dict[str, Any]]) -> Dict[str, Any]:
merged_states = {}
if not states:
return merged_states
for i, (key, item) in enumerate(states[0].items()):
state_list = [state[key] for state in states]
conn = self.connectors[i]
merged_states[key] = conn.merge_states(state_list)
return merged_states
def __repr__(self, indentation: int = 0):
return "\n".join(
[" " * indentation + self.__class__.__name__]
+ [c.__str__(indentation + 4) for c in self.connectors]
)
def __getitem__(
self,
key: Union[str, int, Type],
) -> Union[ConnectorV2, List[ConnectorV2]]:
"""Returns a single ConnectorV2 or list of ConnectorV2s that fit `key`.
If key is an int, we return a single ConnectorV2 at that index in this pipeline.
If key is a ConnectorV2 type or a string matching the class name of a
ConnectorV2 in this pipeline, we return a list of all ConnectorV2s in this
pipeline matching the specified class.
Args:
key: The key to find or to index by.
Returns:
A single ConnectorV2 or a list of ConnectorV2s matching `key`.
"""
# Key is an int -> Index into pipeline and return.
if isinstance(key, int):
return self.connectors[key]
# Key is a class.
elif isinstance(key, type):
results = []
for c in self.connectors:
if issubclass(c.__class__, key):
results.append(c)
return results
# Key is a string -> Find connector(s) by name.
elif isinstance(key, str):
results = []
for c in self.connectors:
if c.name == key:
results.append(c)
return results
# Slicing not supported (yet).
elif isinstance(key, slice):
raise NotImplementedError(
"Slicing of ConnectorPipelineV2 is currently not supported!"
)
else:
raise NotImplementedError(
f"Indexing ConnectorPipelineV2 by {type(key)} is currently not "
f"supported!"
)
@property
def observation_space(self):
if len(self) > 0:
return self.connectors[-1].observation_space
return self._observation_space
@property
def action_space(self):
if len(self) > 0:
return self.connectors[-1].action_space
return self._action_space
def _fix_spaces(self, input_observation_space, input_action_space):
if len(self) > 0:
# Fix each connector's input_observation- and input_action space in
# the pipeline.
obs_space = input_observation_space
act_space = input_action_space
for con in self.connectors:
con.input_action_space = act_space
con.input_observation_space = obs_space
obs_space = con.observation_space
act_space = con.action_space
| ConnectorPipelineV2 |
python | bokeh__bokeh | src/bokeh/client/states.py | {
"start": 2058,
"end": 2261
} | class ____(State):
''' The ``ClientConnection`` is not yet connected.
'''
async def run(self, connection: ClientConnection) -> None:
await connection._connect_async()
| NOT_YET_CONNECTED |
python | ipython__ipython | IPython/utils/_process_win32.py | {
"start": 589,
"end": 6775
} | class ____:
"""A context manager to protect command execution from UNC paths.
In the Win32 API, commands can't be invoked with the cwd being a UNC path.
This context manager temporarily changes directory to the 'C:' drive on
entering, and restores the original working directory on exit.
The context manager returns the starting working directory *if* it made a
change and None otherwise, so that users can apply the necessary adjustment
to their system calls in the event of a change.
Examples
--------
::
cmd = 'dir'
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
os.system(cmd)
"""
def __enter__(self) -> Optional[str]:
self.path = os.getcwd()
self.is_unc_path = self.path.startswith(r"\\")
if self.is_unc_path:
# change to c drive (as cmd.exe cannot handle UNC addresses)
os.chdir("C:")
return self.path
else:
# We return None to signal that there was no change in the working
# directory
return None
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: TracebackType,
) -> None:
if self.is_unc_path:
os.chdir(self.path)
def _system_body(p: subprocess.Popen) -> int:
"""Callback for _system."""
enc = DEFAULT_ENCODING
# Dec 2024: in both of these functions, I'm not sure why we .splitlines()
# the bytes and then decode each line individually instead of just decoding
# the whole thing at once.
def stdout_read() -> None:
try:
assert p.stdout is not None
for byte_line in read_no_interrupt(p.stdout).splitlines():
line = byte_line.decode(enc, "replace")
print(line, file=sys.stdout)
except Exception as e:
print(f"Error reading stdout: {e}", file=sys.stderr)
def stderr_read() -> None:
try:
assert p.stderr is not None
for byte_line in read_no_interrupt(p.stderr).splitlines():
line = byte_line.decode(enc, "replace")
print(line, file=sys.stderr)
except Exception as e:
print(f"Error reading stderr: {e}", file=sys.stderr)
stdout_thread = Thread(target=stdout_read)
stderr_thread = Thread(target=stderr_read)
stdout_thread.start()
stderr_thread.start()
# Wait to finish for returncode. Unfortunately, Python has a bug where
# wait() isn't interruptible (https://bugs.python.org/issue28168) so poll in
# a loop instead of just doing `return p.wait()`
while True:
result = p.poll()
if result is None:
time.sleep(0.01)
else:
break
# Join the threads to ensure they complete before returning
stdout_thread.join()
stderr_thread.join()
return result
def system(cmd: str) -> Optional[int]:
"""Win32 version of os.system() that works with network shares.
Note that this implementation returns None, as meant for use in IPython.
Parameters
----------
cmd : str or list
A command to be executed in the system shell.
Returns
-------
int : child process' exit code.
"""
# The controller provides interactivity with both
# stdin and stdout
# import _process_win32_controller
# _process_win32_controller.system(cmd)
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
res = process_handler(cmd, _system_body)
assert isinstance(res, int | type(None))
return res
return None
def getoutput(cmd: str) -> str:
"""Return standard output of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str or list
A command to be executed in the system shell.
Returns
-------
stdout : str
"""
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT)
if out is None:
out = b""
return py3compat.decode(out)
try:
windll = ctypes.windll # type: ignore [attr-defined]
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPCWSTR)
LocalFree = windll.kernel32.LocalFree
LocalFree.res_type = HLOCAL
LocalFree.arg_types = [HLOCAL]
def arg_split(
commandline: str, posix: bool = False, strict: bool = True
) -> List[str]:
"""Split a command line's arguments in a shell-like manner.
This is a special version for windows that use a ctypes call to CommandLineToArgvW
to do the argv splitting. The posix parameter is ignored.
If strict=False, process_common.arg_split(...strict=False) is used instead.
"""
# CommandLineToArgvW returns path to executable if called with empty string.
if commandline.strip() == "":
return []
if not strict:
# not really a cl-arg, fallback on _process_common
return py_arg_split(commandline, posix=posix, strict=strict)
argvn = c_int()
result_pointer = CommandLineToArgvW(commandline.lstrip(), ctypes.byref(argvn))
try:
result_array_type = LPCWSTR * argvn.value
result = [
arg
for arg in result_array_type.from_address(
ctypes.addressof(result_pointer.contents)
)
if arg is not None
]
finally:
# for side effects
_ = LocalFree(result_pointer)
return result
except AttributeError:
arg_split = py_arg_split
def check_pid(pid: int) -> bool:
# OpenProcess returns 0 if no such process (of ours) exists
# positive int otherwise
return bool(windll.kernel32.OpenProcess(1, 0, pid))
| AvoidUNCPath |
python | dagster-io__dagster | python_modules/libraries/dagster-wandb/dagster_wandb/io_manager.py | {
"start": 1022,
"end": 1307
} | class ____(TypedDict):
dagster_run_id: str
wandb_host: str
wandb_entity: str
wandb_project: str
wandb_run_name: Optional[str]
wandb_run_id: Optional[str]
wandb_run_tags: Optional[list[str]]
base_dir: str
cache_duration_in_minutes: Optional[int]
| Config |
python | celery__celery | t/unit/events/test_state.py | {
"start": 4968,
"end": 7911
} | class ____:
def test_equality(self):
assert Worker(hostname='foo').hostname == 'foo'
assert Worker(hostname='foo') == Worker(hostname='foo')
assert Worker(hostname='foo') != Worker(hostname='bar')
assert hash(Worker(hostname='foo')) == hash(Worker(hostname='foo'))
assert hash(Worker(hostname='foo')) != hash(Worker(hostname='bar'))
def test_heartbeat_expires__Decimal(self):
assert heartbeat_expires(
Decimal(344313.37), freq=60, expire_window=200) == 344433.37
def test_compatible_with_Decimal(self):
w = Worker('george@vandelay.com')
timestamp, local_received = Decimal(time()), time()
w.event('worker-online', timestamp, local_received, fields={
'hostname': 'george@vandelay.com',
'timestamp': timestamp,
'local_received': local_received,
'freq': Decimal(5.6335431),
})
assert w.alive
def test_eq_ne_other(self):
assert Worker('a@b.com') == Worker('a@b.com')
assert Worker('a@b.com') != Worker('b@b.com')
assert Worker('a@b.com') != object()
def test_reduce_direct(self):
w = Worker('george@vandelay.com')
w.event('worker-online', 10.0, 13.0, fields={
'hostname': 'george@vandelay.com',
'timestamp': 10.0,
'local_received': 13.0,
'freq': 60,
})
fun, args = w.__reduce__()
w2 = fun(*args)
assert w2.hostname == w.hostname
assert w2.pid == w.pid
assert w2.freq == w.freq
assert w2.heartbeats == w.heartbeats
assert w2.clock == w.clock
assert w2.active == w.active
assert w2.processed == w.processed
assert w2.loadavg == w.loadavg
assert w2.sw_ident == w.sw_ident
def test_update(self):
w = Worker('george@vandelay.com')
w.update({'idx': '301'}, foo=1, clock=30, bah='foo')
assert w.idx == '301'
assert w.foo == 1
assert w.clock == 30
assert w.bah == 'foo'
def test_survives_missing_timestamp(self):
worker = Worker(hostname='foo')
worker.event('heartbeat')
assert worker.heartbeats == []
def test_repr(self):
assert repr(Worker(hostname='foo'))
def test_drift_warning(self):
worker = Worker(hostname='foo')
with patch('celery.events.state.warn') as warn:
worker.event(None, time() + (HEARTBEAT_DRIFT_MAX * 2), time())
warn.assert_called()
assert 'Substantial drift' in warn.call_args[0][0]
def test_updates_heartbeat(self):
worker = Worker(hostname='foo')
worker.event(None, time(), time())
assert len(worker.heartbeats) == 1
h1 = worker.heartbeats[0]
worker.event(None, time(), time() - 10)
assert len(worker.heartbeats) == 2
assert worker.heartbeats[-1] == h1
| test_Worker |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 30306,
"end": 31173
} | class ____(TokenStream):
def __init__(self,input):
self.input = input;
self.discardMask = BitSet()
def discard(self,arg):
if isinstance(arg,int):
self.discardMask.add(arg)
return
if isinstance(arg,BitSet):
self.discardMark = arg
return
raise TypeError("TokenStreamBasicFilter.discard requires" +
"integer or BitSet argument")
def nextToken(self):
tok = self.input.nextToken()
while tok and self.discardMask.member(tok.getType()):
tok = self.input.nextToken()
return tok
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStreamHiddenTokenFilter ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| TokenStreamBasicFilter |
python | dagster-io__dagster | python_modules/libraries/dagstermill/dagstermill_tests/test_logging.py | {
"start": 440,
"end": 4175
} | class ____(logging.Handler):
def __init__(self, file_path):
self.file_path = file_path
if not os.path.isfile(self.file_path):
with open(self.file_path, "a", encoding="utf8"): # Create file if does not exist
pass
super().__init__()
def emit(self, record):
with open(self.file_path, "a", encoding="utf8") as fd:
fd.write(
seven.json.dumps({"the_message": record.__dict__["dagster_meta"]["orig_message"]})
+ "\n"
)
@logger(config_schema={"name": String, "log_level": String, "file_path": String})
def test_file_logger(init_context):
klass = logging.getLoggerClass()
logger_ = klass(
init_context.logger_config["name"],
level=init_context.logger_config["log_level"],
)
handler = LogTestFileHandler(init_context.logger_config["file_path"])
logger_.addHandler(handler)
handler.setLevel(init_context.logger_config["log_level"])
return logger_
@job(
logger_defs={
"test": test_file_logger,
"critical": test_file_logger,
},
resource_defs={
"output_notebook_io_manager": local_output_notebook_io_manager,
},
)
def hello_logging_job():
hello_logging()
@job(
logger_defs={
"test": test_file_logger,
"critical": test_file_logger,
},
resource_defs={
"output_notebook_io_manager": (
ConfigurableLocalOutputNotebookIOManager.configure_at_launch()
),
},
)
def hello_logging_job_pythonic():
hello_logging()
@pytest.fixture(name="hello_logging_job_type", params=[True, False])
def hello_logging_job_type_fixture(request):
if request.param:
return hello_logging_job
else:
return hello_logging_job_pythonic
def test_logging(hello_logging_job_type) -> None:
with safe_tempfile_path() as test_file_path:
with safe_tempfile_path() as critical_file_path:
with instance_for_test() as instance:
execute_job(
reconstructable(hello_logging_job_type),
run_config={
"loggers": {
"test": {
"config": {
"name": "test",
"file_path": test_file_path,
"log_level": "DEBUG",
}
},
"critical": {
"config": {
"name": "critical",
"file_path": critical_file_path,
"log_level": "CRITICAL",
}
},
}
},
instance=instance,
)
with open(test_file_path, encoding="utf8") as test_file:
records = [
json.loads(line)
for line in test_file.read().strip("\n").split("\n")
if line
]
with open(critical_file_path, encoding="utf8") as critical_file:
critical_records = [
json.loads(line)
for line in critical_file.read().strip("\n").split("\n")
if line
]
messages = [x["the_message"] for x in records]
assert "Hello, there!" in messages
critical_messages = [x["the_message"] for x in critical_records]
assert "Hello, there!" not in critical_messages
| LogTestFileHandler |
python | python__mypy | mypy/test/testcmdline.py | {
"start": 746,
"end": 4980
} | class ____(DataSuite):
files = cmdline_files
native_sep = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
if lxml is None and os.path.basename(testcase.file) == "reports.test":
pytest.skip("Cannot import lxml. Is it installed?")
for step in [1] + sorted(testcase.output2):
test_python_cmdline(testcase, step)
def test_python_cmdline(testcase: DataDrivenTestCase, step: int) -> None:
assert testcase.old_cwd is not None, "test was not properly set up"
# Write the program to a file.
program = "_program.py"
program_path = os.path.join(test_temp_dir, program)
with open(program_path, "w", encoding="utf8") as file:
for s in testcase.input:
file.write(f"{s}\n")
args = parse_args(testcase.input[0])
custom_cwd = parse_cwd(testcase.input[1]) if len(testcase.input) > 1 else None
args.append("--show-traceback")
if "--error-summary" not in args:
args.append("--no-error-summary")
if "--show-error-codes" not in args:
args.append("--hide-error-codes")
if "--disallow-empty-bodies" not in args:
args.append("--allow-empty-bodies")
if "--no-force-union-syntax" not in args:
args.append("--force-union-syntax")
# Type check the program.
fixed = [python3_path, "-m", "mypy"]
env = os.environ.copy()
env.pop("COLUMNS", None)
extra_path = os.path.join(os.path.abspath(test_temp_dir), "pypath")
env["PYTHONPATH"] = PREFIX
if os.path.isdir(extra_path):
env["PYTHONPATH"] += os.pathsep + extra_path
cwd = os.path.join(test_temp_dir, custom_cwd or "")
args = [arg.replace("$CWD", os.path.abspath(cwd)) for arg in args]
process = subprocess.Popen(
fixed + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env
)
outb, errb = process.communicate()
result = process.returncode
# Split output into lines.
out = [s.rstrip("\n\r") for s in str(outb, "utf8").splitlines()]
err = [s.rstrip("\n\r") for s in str(errb, "utf8").splitlines()]
if "PYCHARM_HOSTED" in os.environ:
for pos, line in enumerate(err):
if line.startswith("pydev debugger: "):
# Delete the attaching debugger message itself, plus the extra newline added.
del err[pos : pos + 2]
break
# Remove temp file.
os.remove(program_path)
# Compare actual output to expected.
if testcase.output_files:
# Ignore stdout, but we insist on empty stderr and zero status.
if err or result:
raise AssertionError(
"Expected zero status and empty stderr%s, got %d and\n%s"
% (" on step %d" % step if testcase.output2 else "", result, "\n".join(err + out))
)
check_test_output_files(testcase, step)
else:
if testcase.normalize_output:
out = normalize_error_messages(err + out)
obvious_result = 1 if out else 0
if obvious_result != result:
out.append(f"== Return code: {result}")
expected_out = testcase.output if step == 1 else testcase.output2[step]
# Strip "tmp/" out of the test so that # E: works...
expected_out = [s.replace("tmp" + os.sep, "") for s in expected_out]
assert_string_arrays_equal(
expected_out,
out,
"Invalid output ({}, line {}){}".format(
testcase.file, testcase.line, " on step %d" % step if testcase.output2 else ""
),
)
def parse_args(line: str) -> list[str]:
"""Parse the first line of the program for the command line.
This should have the form
# cmd: mypy <options>
For example:
# cmd: mypy pkg/
"""
m = re.match("# cmd: mypy (.*)$", line)
if not m:
return [] # No args; mypy will spit out an error.
return m.group(1).split()
def parse_cwd(line: str) -> str | None:
"""Parse the second line of the program for the command line.
This should have the form
# cwd: <directory>
For example:
# cwd: main/subdir
"""
m = re.match("# cwd: (.*)$", line)
return m.group(1) if m else None
| PythonCmdlineSuite |
python | kamyu104__LeetCode-Solutions | Python/count-stable-subarrays.py | {
"start": 46,
"end": 780
} | class ____(object):
def countStableSubarrays(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
def count(n):
return (n+1)*n//2
right = range(len(nums))
for i in reversed(xrange(len(nums)-1)):
if nums[i] <= nums[i+1]:
right[i] = right[i+1]
prefix = [0]*(len(nums)+1)
curr = 0
for i in xrange(len(nums)):
if i-1 >= 0 and nums[i-1] > nums[i]:
curr = 0
curr += 1
prefix[i+1] = prefix[i]+curr
return [count(min(right[l], r)-l+1)+(prefix[r+1]-prefix[min(right[l], r)+1]) for l, r in queries]
| Solution |
python | getsentry__sentry | src/sentry/analytics/events/issue_auto_resolved.py | {
"start": 76,
"end": 317
} | class ____(analytics.Event):
project_id: int | None = None
organization_id: int
group_id: int
issue_category: str | None = None
issue_type: str | None = None
analytics.register(IssueAutoResolvedEvent)
| IssueAutoResolvedEvent |
python | mlflow__mlflow | mlflow/utils/search_logged_model_utils.py | {
"start": 291,
"end": 860
} | class ____(Enum):
ATTRIBUTE = "attributes"
METRIC = "metrics"
PARAM = "params"
TAG = "tags"
@classmethod
def from_str(cls, s: str) -> "EntityType":
if s == "attributes":
return cls.ATTRIBUTE
if s == "metrics":
return cls.METRIC
if s == "params":
return cls.PARAM
if s == "tags":
return cls.TAG
raise MlflowException.invalid_parameter_value(
f"Invalid entity type: {s!r}. Expected one of {[e.value for e in cls]}."
)
@dataclass
| EntityType |
python | getsentry__sentry | src/sentry/snuba/dataset.py | {
"start": 2372,
"end": 2468
} | class ____(Enum):
ProfileChunks = "profile_chunks"
SearchIssues = "search_issues"
| StorageKey |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 175864,
"end": 176196
} | class ____:
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0, 3, 1)
assert_allclose(prol, array([0.319000,
2.593084,
6.533471,
12.514462]),
atol=1.5e-5, rtol=0)
| TestProCvSeq |
python | getsentry__sentry | src/sentry/api/serializers/models/project.py | {
"start": 10319,
"end": 10454
} | class ____(TypedDict, total=False):
stats: Any
transactionStats: Any
sessionStats: Any
| _ProjectSerializerOptionalBaseResponse |
python | scrapy__scrapy | tests/AsyncCrawlerRunner/multi_seq.py | {
"start": 265,
"end": 657
} | class ____(Spider):
name = "no_request"
async def start(self):
return
yield
@deferred_f_from_coro_f
async def main(reactor):
configure_logging()
runner = AsyncCrawlerRunner()
await runner.crawl(NoRequestsSpider)
await runner.crawl(NoRequestsSpider)
install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor")
react(main)
| NoRequestsSpider |
python | kamyu104__LeetCode-Solutions | Python/count-vowel-substrings-of-a-string.py | {
"start": 50,
"end": 907
} | class ____(object):
def countVowelSubstrings(self, word):
"""
:type word: str
:rtype: int
"""
VOWELS = set("aeiou")
k = 5
def atLeastK(word, k):
cnt = collections.Counter()
result = left = right = 0
for i, c in enumerate(word):
if c not in VOWELS:
cnt = collections.Counter()
left = right = i+1
continue
cnt[c] += 1
while len(cnt) > k-1:
cnt[word[right]] -= 1
if not cnt[word[right]]:
del cnt[word[right]]
right += 1
result += right-left
return result
return atLeastK(word, k)
# Time: O(n)
# Space: O(1)
import collections
| Solution |
python | pypa__pip | src/pip/_internal/network/xmlrpc.py | {
"start": 455,
"end": 1830
} | class ____(xmlrpc.client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(
self, index_url: str, session: PipSession, use_datetime: bool = False
) -> None:
super().__init__(use_datetime)
index_parts = urllib.parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(
self,
host: "_HostType",
handler: str,
request_body: "SizedBuffer",
verbose: bool = False,
) -> tuple["_Marshallable", ...]:
assert isinstance(host, str)
parts = (self._scheme, host, handler, None, None, None)
url = urllib.parse.urlunparse(parts)
try:
headers = {"Content-Type": "text/xml"}
response = self._session.post(
url,
data=request_body,
headers=headers,
stream=True,
)
raise_for_status(response)
self.verbose = verbose
return self.parse_response(response.raw)
except NetworkConnectionError as exc:
assert exc.response
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code,
url,
)
raise
| PipXmlrpcTransport |
python | docker__docker-py | tests/integration/api_container_test.py | {
"start": 47018,
"end": 52024
} | class ____(BaseAPIIntegrationTest):
def test_run_container_streaming(self):
container = self.client.create_container(TEST_IMG, '/bin/sh',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
sock = self.client.attach_socket(container, ws=False)
assert sock.fileno() > -1
def test_run_container_reading_socket_http(self):
line = 'hi there and stuff and things, words!'
# `echo` appends CRLF, `printf` doesn't
command = f"printf '{line}'"
container = self.client.create_container(TEST_IMG, command,
detach=True, tty=False)
self.tmp_containers.append(container)
opts = {"stdout": 1, "stream": 1, "logs": 1}
pty_stdout = self.client.attach_socket(container, opts)
self.addCleanup(pty_stdout.close)
self.client.start(container)
(stream, next_size) = next_frame_header(pty_stdout)
assert stream == 1 # correspond to stdout
assert next_size == len(line)
data = read_exactly(pty_stdout, next_size)
assert data.decode('utf-8') == line
@pytest.mark.xfail(condition=bool(os.environ.get('DOCKER_CERT_PATH', '')),
reason='DOCKER_CERT_PATH not respected for websockets')
def test_run_container_reading_socket_ws(self):
line = 'hi there and stuff and things, words!'
# `echo` appends CRLF, `printf` doesn't
command = f"printf '{line}'"
container = self.client.create_container(TEST_IMG, command,
detach=True, tty=False)
self.tmp_containers.append(container)
opts = {"stdout": 1, "stream": 1, "logs": 1}
pty_stdout = self.client.attach_socket(container, opts, ws=True)
self.addCleanup(pty_stdout.close)
self.client.start(container)
data = pty_stdout.recv()
assert data.decode('utf-8') == line
@pytest.mark.timeout(10)
def test_attach_no_stream(self):
container = self.client.create_container(
TEST_IMG, 'echo hello'
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container, condition='not-running')
output = self.client.attach(container, stream=False, logs=True)
assert output == 'hello\n'.encode(encoding='ascii')
@pytest.mark.timeout(10)
@pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
reason='No cancellable streams over SSH')
@pytest.mark.xfail(condition=os.environ.get('DOCKER_TLS_VERIFY') or
os.environ.get('DOCKER_CERT_PATH'),
reason='Flaky test on TLS')
def test_attach_stream_and_cancel(self):
container = self.client.create_container(
TEST_IMG, 'sh -c "sleep 2 && echo hello && sleep 60"',
tty=True
)
self.tmp_containers.append(container)
self.client.start(container)
output = self.client.attach(container, stream=True, logs=True)
threading.Timer(3, output.close).start()
lines = []
for line in output:
lines.append(line)
assert len(lines) == 1
assert lines[0] == 'hello\r\n'.encode(encoding='ascii')
def test_detach_with_default(self):
container = self.client.create_container(
TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
self.client.start(container)
sock = self.client.attach_socket(
container,
{'stdin': True, 'stream': True}
)
assert_cat_socket_detached_with_keys(
sock, [ctrl_with('p'), ctrl_with('q')]
)
def test_detach_with_config_file(self):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
self.client.start(container)
sock = self.client.attach_socket(
container,
{'stdin': True, 'stream': True}
)
assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
def test_detach_with_arg(self):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
TEST_IMG, 'cat',
detach=True, stdin_open=True, tty=True
)
self.tmp_containers.append(container)
self.client.start(container)
sock = self.client.attach_socket(
container,
{'stdin': True, 'stream': True, 'detachKeys': 'ctrl-x'}
)
assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
| AttachContainerTest |
python | apache__airflow | airflow-core/tests/unit/plugins/test_plugin.py | {
"start": 4699,
"end": 4836
} | class ____(PriorityWeightStrategy):
def get_weight(self, ti):
return 1
# Defining the plugin class
| CustomPriorityWeightStrategy |
python | huggingface__transformers | src/transformers/models/zoedepth/modeling_zoedepth.py | {
"start": 13229,
"end": 15304
} | class ____(nn.Module):
"""
Relative depth estimation head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
the predictions to the input resolution after the first convolutional layer (details can be found in DPT's paper's
supplementary material).
"""
def __init__(self, config):
super().__init__()
self.head_in_index = config.head_in_index
self.projection = None
if config.add_projection:
self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
features = config.fusion_hidden_size
self.conv1 = nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1)
self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
self.conv2 = nn.Conv2d(features // 2, config.num_relative_features, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(config.num_relative_features, 1, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
# use last features
hidden_states = hidden_states[self.head_in_index]
if self.projection is not None:
hidden_states = self.projection(hidden_states)
hidden_states = nn.ReLU()(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.upsample(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = nn.ReLU()(hidden_states)
# we need the features here (after second conv + ReLu)
features = hidden_states
hidden_states = self.conv3(hidden_states)
hidden_states = nn.ReLU()(hidden_states)
predicted_depth = hidden_states.squeeze(dim=1)
return predicted_depth, features
def log_binom(n, k, eps=1e-7):
"""log(nCk) using stirling approximation"""
n = n + eps
k = k + eps
return n * torch.log(n) - k * torch.log(k) - (n - k) * torch.log(n - k + eps)
| ZoeDepthRelativeDepthEstimationHead |
python | sqlalchemy__sqlalchemy | test/sql/test_text.py | {
"start": 27676,
"end": 38297
} | class ____(
fixtures.TestBase, AssertsCompiledSQL, fixtures.DistinctOnFixture
):
__dialect__ = "default"
def _test_exception(self, stmt, offending_clause, dialect=None):
assert_raises_message(
exc.CompileError,
r"Can't resolve label reference for ORDER BY / GROUP BY / "
"DISTINCT etc. "
"Textual SQL "
"expression %r should be explicitly "
r"declared as text\(%r\)" % (offending_clause, offending_clause),
self.assert_compile,
stmt,
"not expected",
dialect=dialect,
)
def test_order_by_label(self):
stmt = select(table1.c.myid.label("foo")).order_by("foo")
self.assert_compile(
stmt, "SELECT mytable.myid AS foo FROM mytable ORDER BY foo"
)
def test_no_order_by_text(self):
stmt = select(text("foo")).order_by("foo")
with expect_raises_message(
exc.CompileError,
r"Can't resolve label reference for ORDER BY / GROUP BY / ",
):
stmt.compile()
def test_order_by_colname(self):
stmt = select(table1.c.myid).order_by("name")
self.assert_compile(
stmt, "SELECT mytable.myid FROM mytable ORDER BY mytable.name"
)
def test_order_by_alias_colname(self):
t1 = table1.alias()
stmt = (
select(t1.c.myid)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.order_by("name")
)
self.assert_compile(
stmt,
"SELECT mytable_1.myid AS mytable_1_myid "
"FROM mytable AS mytable_1 ORDER BY mytable_1.name",
)
@testing.combinations(
((column("q") + 5).label("a"), "a", ()),
(column("q").op("+")(5).label("a"), "a", ()),
((column("q") + 5).label("a"), "a DESC", (desc,)),
(column("q").op("+")(5).label("a"), "a DESC", (desc,)),
)
def test_order_by_expr(self, case, expected, modifiers):
order_by = case
for mod in modifiers:
order_by = mod(order_by)
stmt = select(case).order_by(order_by)
col_expr = str(case)
self.assert_compile(
stmt, "SELECT %s AS a ORDER BY %s" % (col_expr, expected)
)
def test_order_by_named_label_from_anon_label(self):
s1 = select(table1.c.myid.label(None).label("foo"), table1.c.name)
stmt = s1.order_by("foo")
self.assert_compile(
stmt,
"SELECT mytable.myid AS foo, mytable.name "
"FROM mytable ORDER BY foo",
)
def test_order_by_outermost_label(self):
# test [ticket:3335], assure that order_by("foo")
# catches the label named "foo" in the columns clause only,
# and not the label named "foo" in the FROM clause
s1 = select(table1.c.myid.label("foo"), table1.c.name).alias()
stmt = select(s1.c.name, func.bar().label("foo")).order_by("foo")
self.assert_compile(
stmt,
"SELECT anon_1.name, bar() AS foo FROM "
"(SELECT mytable.myid AS foo, mytable.name AS name "
"FROM mytable) AS anon_1 ORDER BY foo",
)
def test_unresolvable_warning_order_by(self):
stmt = select(table1.c.myid).order_by("foobar")
self._test_exception(stmt, "foobar")
def test_distinct_label(self, distinct_on_fixture):
stmt = distinct_on_fixture(select(table1.c.myid.label("foo")), "foo")
self.assert_compile(
stmt,
"SELECT DISTINCT ON (foo) mytable.myid AS foo FROM mytable",
dialect="postgresql",
)
def test_unresolvable_distinct_label(self, distinct_on_fixture):
stmt = distinct_on_fixture(
select(table1.c.myid.label("foo")), "not a label"
)
self._test_exception(stmt, "not a label", dialect="postgresql")
def test_group_by_label(self):
stmt = select(table1.c.myid.label("foo")).group_by("foo")
self.assert_compile(
stmt, "SELECT mytable.myid AS foo FROM mytable GROUP BY foo"
)
def test_group_by_colname(self):
stmt = select(table1.c.myid).group_by("name")
self.assert_compile(
stmt, "SELECT mytable.myid FROM mytable GROUP BY mytable.name"
)
def test_unresolvable_warning_group_by(self):
stmt = select(table1.c.myid).group_by("foobar")
self._test_exception(stmt, "foobar")
def test_asc(self):
stmt = select(table1.c.myid).order_by(asc("name"), "description")
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable "
"ORDER BY mytable.name ASC, mytable.description",
)
def test_group_by_subquery(self):
stmt = select(table1).alias()
stmt = (
select(stmt)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.group_by("myid")
)
self.assert_compile(
stmt,
"SELECT anon_1.myid AS anon_1_myid, anon_1.name AS anon_1_name, "
"anon_1.description AS anon_1_description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS anon_1 "
"GROUP BY anon_1.myid",
)
def test_order_by_literal_col_quoting_one(self):
col = literal_column("SUM(ABC)").label("SUM(ABC)")
tbl = table("my_table")
query = select(col).select_from(tbl).order_by(col)
self.assert_compile(
query,
'SELECT SUM(ABC) AS "SUM(ABC)" FROM my_table ORDER BY "SUM(ABC)"',
)
def test_order_by_literal_col_quoting_two(self):
col = literal_column("SUM(ABC)").label("SUM(ABC)_")
tbl = table("my_table")
query = select(col).select_from(tbl).order_by(col)
self.assert_compile(
query,
'SELECT SUM(ABC) AS "SUM(ABC)_" FROM my_table ORDER BY '
'"SUM(ABC)_"',
)
def test_order_by_literal_col_quoting_one_explicit_quote(self):
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)", True))
tbl = table("my_table")
query = select(col).select_from(tbl).order_by(col)
self.assert_compile(
query,
'SELECT SUM(ABC) AS "SUM(ABC)" FROM my_table ORDER BY "SUM(ABC)"',
)
def test_order_by_literal_col_quoting_two_explicit_quote(self):
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)_", True))
tbl = table("my_table")
query = select(col).select_from(tbl).order_by(col)
self.assert_compile(
query,
'SELECT SUM(ABC) AS "SUM(ABC)_" FROM my_table ORDER BY '
'"SUM(ABC)_"',
)
def test_order_by_func_label_desc(self):
stmt = select(func.foo("bar").label("fb"), table1).order_by(desc("fb"))
self.assert_compile(
stmt,
"SELECT foo(:foo_1) AS fb, mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY fb DESC",
)
def test_pg_distinct(self, distinct_on_fixture):
stmt = distinct_on_fixture(select(table1), "name")
self.assert_compile(
stmt,
"SELECT DISTINCT ON (mytable.name) mytable.myid, "
"mytable.name, mytable.description FROM mytable",
dialect="postgresql",
)
def test_over(self):
stmt = select(column("foo"), column("bar")).subquery()
stmt = select(
func.row_number().over(order_by="foo", partition_by="bar")
).select_from(stmt)
self.assert_compile(
stmt,
"SELECT row_number() OVER "
"(PARTITION BY anon_2.bar ORDER BY anon_2.foo) "
"AS anon_1 FROM (SELECT foo, bar) AS anon_2",
)
def test_union_column(self):
s1 = select(table1)
s2 = select(table1)
stmt = union(s1, s2).order_by("name")
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY name",
)
def test_union_label(self):
s1 = select(func.foo("hoho").label("x"))
s2 = select(func.foo("Bar").label("y"))
stmt = union(s1, s2).order_by("x")
self.assert_compile(
stmt,
"SELECT foo(:foo_1) AS x UNION SELECT foo(:foo_2) AS y ORDER BY x",
)
def test_standalone_units_stringable(self):
self.assert_compile(desc("somelabel"), "somelabel DESC")
def test_columnadapter_anonymized(self):
"""test issue #3148
Testing the anonymization applied from the ColumnAdapter.columns
collection, typically as used in eager loading.
"""
exprs = [
table1.c.myid,
table1.c.name.label("t1name"),
func.foo("hoho").label("x"),
]
ta = table1.alias()
adapter = sql_util.ColumnAdapter(ta, anonymize_labels=True)
s1 = (
select(*[adapter.columns[expr] for expr in exprs])
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.order_by("myid", "t1name", "x")
)
assert_raises_message(
exc.CompileError,
r"Can't resolve label reference for ORDER BY / GROUP BY / "
"DISTINCT etc. "
"Textual SQL "
"expression 't1name' should be explicitly "
r"declared as text\('t1name'\)",
s1.compile,
)
def test_columnadapter_non_anonymized(self):
"""test issue #3148
Testing the anonymization applied from the ColumnAdapter.columns
collection, typically as used in eager loading.
"""
exprs = [
table1.c.myid,
table1.c.name.label("t1name"),
func.foo("hoho").label("x"),
]
ta = table1.alias()
adapter = sql_util.ColumnAdapter(ta)
s1 = (
select(*[adapter.columns[expr] for expr in exprs])
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.order_by("myid", "t1name", "x")
)
# labels are maintained
self.assert_compile(
s1,
"SELECT mytable_1.myid AS mytable_1_myid, "
"mytable_1.name AS t1name, foo(:foo_1) AS x "
"FROM mytable AS mytable_1 ORDER BY mytable_1.myid, t1name, x",
)
| OrderByLabelResolutionTest |
python | PyCQA__pylint | pylint/testutils/reporter_for_tests.py | {
"start": 2097,
"end": 2321
} | class ____(BaseReporter):
def display_reports(self, layout: Section) -> None:
"""Ignore layouts and don't call self._display()."""
def _display(self, layout: Section) -> None:
pass
| FunctionalTestReporter |
python | django-debug-toolbar__django-debug-toolbar | tests/test_login_not_required.py | {
"start": 714,
"end": 1568
} | class ____(SimpleTestCase):
def test_panels(self):
for uri in (
"history_sidebar",
"history_refresh",
"sql_select",
"sql_explain",
"sql_profile",
"template_source",
):
with self.subTest(uri=uri):
response = self.client.get(reverse(f"djdt:{uri}"))
self.assertNotEqual(response.status_code, 200)
def test_render_panel(self):
request_id = toolbar_request_id()
get_store().save_panel(
request_id, VersionsPanel.panel_id, {"value": "Test data"}
)
data = {"request_id": request_id, "panel_id": VersionsPanel.panel_id}
response = self.client.get(reverse("djdt:render_panel"), query_params=data)
self.assertEqual(response.status_code, 200)
| LoginNotRequiredTestCase |
python | getsentry__sentry | src/sentry/shared_integrations/response/sequence.py | {
"start": 168,
"end": 450
} | class ____(list, BaseApiResponse):
def __init__(self, data: Sequence[Any], *args: Any, **kwargs: Any) -> None:
list.__init__(self, data)
BaseApiResponse.__init__(self, *args, **kwargs)
@property
def json(self) -> Any:
return self
| SequenceApiResponse |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/PlotCurveItem.py | {
"start": 47236,
"end": 48140
} | class ____(PlotCurveItem):
"""Plot curve that monitors an ROI and image for changes to automatically replot."""
def __init__(self, roi, data, img, axes=(0,1), xVals=None, color=None):
self.roi = roi
self.roiData = data
self.roiImg = img
self.axes = axes
self.xVals = xVals
PlotCurveItem.__init__(self, self.getRoiData(), x=self.xVals, color=color)
#roi.connect(roi, QtCore.SIGNAL('regionChanged'), self.roiChangedEvent)
roi.sigRegionChanged.connect(self.roiChangedEvent)
#self.roiChangedEvent()
def getRoiData(self):
d = self.roi.getArrayRegion(self.roiData, self.roiImg, axes=self.axes)
if d is None:
return
while d.ndim > 1:
d = d.mean(axis=1)
return d
def roiChangedEvent(self):
d = self.getRoiData()
self.updateData(d, self.xVals)
| ROIPlotItem |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.