language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pola-rs__polars | py-polars/src/polars/io/csv/batched_reader.py | {
"start": 712,
"end": 5018
} | class ____:
"""Read a CSV file in batches."""
def __init__(
self,
source: str | Path,
*,
has_header: bool = True,
columns: Sequence[int] | Sequence[str] | None = None,
separator: str = ",",
comment_prefix: str | None = None,
quote_char: str | None = '"',
skip_rows: int = 0,
skip_lines: int = 0,
schema_overrides: SchemaDict | Sequence[PolarsDataType] | None = None,
null_values: str | Sequence[str] | dict[str, str] | None = None,
missing_utf8_is_empty_string: bool = False,
ignore_errors: bool = False,
try_parse_dates: bool = False,
n_threads: int | None = None,
infer_schema_length: int | None = N_INFER_DEFAULT,
batch_size: int = 50_000,
n_rows: int | None = None,
encoding: CsvEncoding = "utf8",
low_memory: bool = False,
rechunk: bool = True,
skip_rows_after_header: int = 0,
row_index_name: str | None = None,
row_index_offset: int = 0,
eol_char: str = "\n",
new_columns: Sequence[str] | None = None,
raise_if_empty: bool = True,
truncate_ragged_lines: bool = False,
decimal_comma: bool = False,
) -> None:
path = normalize_filepath(source, check_not_directory=False)
dtype_list: Sequence[tuple[str, PolarsDataType]] | None = None
dtype_slice: Sequence[PolarsDataType] | None = None
if schema_overrides is not None:
if isinstance(schema_overrides, dict):
dtype_list = []
for k, v in schema_overrides.items():
dtype_list.append((k, parse_into_dtype(v)))
elif isinstance(schema_overrides, Sequence):
dtype_slice = schema_overrides
else:
msg = "`schema_overrides` arg should be list or dict"
raise TypeError(msg)
processed_null_values = _process_null_values(null_values)
projection, columns = parse_columns_arg(columns)
self._reader = PyBatchedCsv.new(
infer_schema_length=infer_schema_length,
chunk_size=batch_size,
has_header=has_header,
ignore_errors=ignore_errors,
n_rows=n_rows,
skip_rows=skip_rows,
skip_lines=skip_lines,
projection=projection,
separator=separator,
rechunk=rechunk,
columns=columns,
encoding=encoding,
n_threads=n_threads,
path=path,
schema_overrides=dtype_list,
overwrite_dtype_slice=dtype_slice,
low_memory=low_memory,
comment_prefix=comment_prefix,
quote_char=quote_char,
null_values=processed_null_values,
missing_utf8_is_empty_string=missing_utf8_is_empty_string,
try_parse_dates=try_parse_dates,
skip_rows_after_header=skip_rows_after_header,
row_index=parse_row_index_args(row_index_name, row_index_offset),
eol_char=eol_char,
raise_if_empty=raise_if_empty,
truncate_ragged_lines=truncate_ragged_lines,
decimal_comma=decimal_comma,
)
self.new_columns = new_columns
def next_batches(self, n: int) -> list[DataFrame] | None:
"""
Read `n` batches from the reader.
These batches will be parallelized over the available threads.
Parameters
----------
n
Number of chunks to fetch; ideally this is >= number of threads.
Examples
--------
>>> reader = pl.read_csv_batched(
... "./pdsh/tables_scale_100/lineitem.tbl",
... separator="|",
... try_parse_dates=True,
... ) # doctest: +SKIP
>>> reader.next_batches(5) # doctest: +SKIP
Returns
-------
list of DataFrames
"""
if (batches := self._reader.next_batches(n)) is not None:
if self.new_columns:
return [
_update_columns(wrap_df(df), self.new_columns) for df in batches
]
else:
return [wrap_df(df) for df in batches]
return None
| BatchedCsvReader |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 21015,
"end": 22014
} | class ____(TypeSystemDefinition):
__slots__ = ('loc', 'directives', 'operation_types',)
_fields = ('operation_types',)
def __init__(self, operation_types, loc=None, directives=None):
self.operation_types = operation_types
self.loc = loc
self.directives = directives
def __eq__(self, other):
return (
self is other or (
isinstance(other, SchemaDefinition) and
self.operation_types == other.operation_types and
self.directives == other.directives
)
)
def __repr__(self):
return ('SchemaDefinition('
'operation_types={self.operation_types!r}'
', directives={self.directives!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.operation_types,
self.loc,
self.directives,
)
def __hash__(self):
return id(self)
| SchemaDefinition |
python | ray-project__ray | rllib/utils/exploration/parameter_noise.py | {
"start": 1000,
"end": 17226
} | class ____(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(
self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev: float = 1.0,
random_timesteps: int = 10000,
sub_exploration: Optional[dict] = None,
**kwargs
):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tf2":
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tf2":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {"cur_stddev": self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
self.stddev_val = state["cur_stddev"]
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
| ParameterNoise |
python | kamyu104__LeetCode-Solutions | Python/maximize-active-section-with-trade-ii.py | {
"start": 56,
"end": 2798
} | class ____(object):
def maxActiveSectionsAfterTrade(self, s, queries):
"""
:type s: str
:type queries: List[List[int]]
:rtype: List[int]
"""
# RMQ - Sparse Table
# Template: https://github.com/kamyu104/GoogleCodeJam-Farewell-Rounds/blob/main/Round%20D/genetic_sequences2.py3
# Time: ctor: O(NlogN) * O(fn)
# query: O(fn)
# Space: O(NlogN)
class SparseTable(object):
def __init__(self, arr, fn):
self.fn = fn
self.bit_length = [0]
n = len(arr)
k = n.bit_length()-1 # log2_floor(n)
for i in xrange(k+1):
self.bit_length.extend(i+1 for _ in xrange(min(1<<i, (n+1)-len(self.bit_length))))
self.st = [[0]*n for _ in xrange(k+1)]
if not self.st:
return
self.st[0] = arr[:]
for i in xrange(1, k+1): # Time: O(NlogN) * O(fn)
for j in xrange((n-(1<<i))+1):
self.st[i][j] = fn(self.st[i-1][j], self.st[i-1][j+(1<<(i-1))])
def query(self, L, R): # Time: O(fn)
i = self.bit_length[R-L+1]-1
return self.fn(self.st[i][L], self.st[i][R-(1<<i)+1])
lookup = [-1]*len(s)
idxs = []
cnt1 = 0
for i, x in enumerate(s):
if x == '0':
if i-1 >= 0 and s[i-1] == '0':
idxs[-1][1] += 1
else:
idxs.append([i, 1])
else:
cnt1 += 1
lookup[i] = len(idxs)-1
if not idxs:
return [cnt1]*len(queries)
arr = [0]*(len(idxs)-1)
for i in xrange(len(idxs)-1):
arr[i] = idxs[i][1]+idxs[i+1][1]
st = SparseTable(arr, max)
result = [cnt1]*len(queries)
for i, (l, r) in enumerate(queries):
left, right = lookup[l]+1, lookup[r]-int(s[r] == '0')
left_cnt = idxs[lookup[l]][1]-(l-idxs[lookup[l]][0]) if lookup[l] != -1 else -1
right_cnt = r-idxs[lookup[r]][0]+1 if lookup[r] != -1 else -1
if left <= right-1:
result[i] = max(result[i], cnt1 + st.query(left, right-1))
if s[l] == '0' and s[r] == '0' and lookup[l]+1 == lookup[r]:
result[i] = max(result[i], cnt1+left_cnt+right_cnt)
if s[l] == '0' and lookup[l]+1 <= right:
result[i] = max(result[i], cnt1+left_cnt+idxs[lookup[l]+1][1])
if s[r] == '0' and left <= lookup[r]-1:
result[i] = max(result[i], cnt1+right_cnt+idxs[lookup[r]-1][1])
return result
| Solution |
python | getsentry__sentry | tests/sentry/core/endpoints/test_project_details.py | {
"start": 72228,
"end": 90101
} | class ____(TestProjectDetailsBase):
endpoint = "sentry-api-0-project-details"
def setUp(self) -> None:
super().setUp()
self.new_ds_flag = "organizations:dynamic-sampling"
self.url = reverse(
"sentry-api-0-project-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
self.login_as(user=self.user)
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
self.authorization = f"Bearer {token.token}"
def test_get_dynamic_sampling_default_biases(self) -> None:
"""
Tests the case when organization on AM2 plan, but haven't manipulated the bias toggles
yet, so they get the default biases.
"""
with Feature(
{
self.new_ds_flag: True,
}
):
response = self.get_success_response(
self.organization.slug, self.project.slug, method="get"
)
assert response.data["dynamicSamplingBiases"] == DEFAULT_BIASES
def test_get_dynamic_sampling_biases_manually_set_biases(self) -> None:
"""
Tests the case when an organization on AM2 plan, and have manipulated the bias toggles,
so they should get their actual bias preferences.
"""
new_biases = [{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False}]
self.project.update_option("sentry:dynamic_sampling_biases", new_biases)
with Feature(
{
self.new_ds_flag: True,
}
):
response = self.get_success_response(
self.organization.slug, self.project.slug, method="get"
)
assert response.data["dynamicSamplingBiases"] == [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_LATEST_RELEASES_RULE.value, "active": True},
{"id": RuleType.IGNORE_HEALTH_CHECKS_RULE.value, "active": True},
{"id": RuleType.BOOST_KEY_TRANSACTIONS_RULE.value, "active": True},
{"id": RuleType.BOOST_LOW_VOLUME_TRANSACTIONS_RULE.value, "active": True},
{"id": RuleType.BOOST_REPLAY_ID_RULE.value, "active": True},
{"id": RuleType.RECALIBRATION_RULE.value, "active": True},
{"id": RuleType.MINIMUM_SAMPLE_RATE_RULE.value, "active": False},
]
def test_get_dynamic_sampling_biases_with_previously_assigned_biases(self) -> None:
self.project.update_option(
"sentry:dynamic_sampling_biases",
[{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False}],
)
with Feature(
{
self.new_ds_flag: True,
}
):
response = self.get_success_response(
self.organization.slug, self.project.slug, method="get"
)
assert response.data["dynamicSamplingBiases"] == [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_LATEST_RELEASES_RULE.value, "active": True},
{"id": RuleType.IGNORE_HEALTH_CHECKS_RULE.value, "active": True},
{"id": RuleType.BOOST_KEY_TRANSACTIONS_RULE.value, "active": True},
{"id": RuleType.BOOST_LOW_VOLUME_TRANSACTIONS_RULE.value, "active": True},
{"id": RuleType.BOOST_REPLAY_ID_RULE.value, "active": True},
{"id": RuleType.RECALIBRATION_RULE.value, "active": True},
{"id": RuleType.MINIMUM_SAMPLE_RATE_RULE.value, "active": False},
]
def test_dynamic_sampling_bias_activation(self) -> None:
"""
Tests that when sending a request to enable a dynamic sampling bias,
the bias will be successfully enabled and the audit log 'SAMPLING_BIAS_ENABLED' will be triggered
"""
self.project.update_option(
"sentry:dynamic_sampling_biases",
[
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_REPLAY_ID_RULE.value, "active": False},
],
)
self.login_as(self.user)
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
authorization = f"Bearer {token.token}"
url = reverse(
"sentry-api-0-project-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
with Feature({self.new_ds_flag: True}), outbox_runner():
self.client.put(
url,
format="json",
HTTP_AUTHORIZATION=authorization,
data={
"dynamicSamplingBiases": [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": True},
]
},
)
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=self.project.organization_id,
event=audit_log.get_event_id("SAMPLING_BIAS_ENABLED"),
).exists()
def test_dynamic_sampling_bias_deactivation(self) -> None:
"""
Tests that when sending a request to disable a dynamic sampling bias,
the bias will be successfully disabled and the audit log 'SAMPLING_BIAS_DISABLED' will be triggered
"""
self.project.update_option(
"sentry:dynamic_sampling_biases",
[
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": True},
{"id": RuleType.BOOST_REPLAY_ID_RULE.value, "active": False},
],
)
self.login_as(self.user)
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
authorization = f"Bearer {token.token}"
url = reverse(
"sentry-api-0-project-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
with Feature({self.new_ds_flag: True}), outbox_runner():
self.client.put(
url,
format="json",
HTTP_AUTHORIZATION=authorization,
data={
"dynamicSamplingBiases": [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_REPLAY_ID_RULE.value, "active": False},
]
},
)
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
organization_id=self.project.organization_id,
event=audit_log.get_event_id("SAMPLING_BIAS_DISABLED"),
).exists()
def test_put_dynamic_sampling_after_migrating_to_new_plan_default_biases_with_missing_flags(
self,
):
"""
Test for case when a user is an old plan but tries to update dynamic sampling biases that is a
feature of new plans
"""
response = self.client.put(
self.url,
format="json",
HTTP_AUTHORIZATION=self.authorization,
data={"dynamicSamplingBiases": DEFAULT_BIASES},
)
assert response.status_code == 403
assert response.data["detail"] == "dynamicSamplingBiases is not a valid field"
def test_put_new_dynamic_sampling_rules_with_correct_flags(self) -> None:
"""
Test when user is on a new plan and is trying to update dynamic sampling features of a new plan
"""
new_biases = [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_LATEST_RELEASES_RULE.value, "active": False},
{"id": RuleType.IGNORE_HEALTH_CHECKS_RULE.value, "active": False},
{"id": RuleType.BOOST_KEY_TRANSACTIONS_RULE.value, "active": False},
{"id": RuleType.BOOST_LOW_VOLUME_TRANSACTIONS_RULE.value, "active": False},
{"id": RuleType.BOOST_REPLAY_ID_RULE.value, "active": False},
{"id": RuleType.RECALIBRATION_RULE.value, "active": False},
{"id": RuleType.MINIMUM_SAMPLE_RATE_RULE.value, "active": False},
]
with Feature(
{
self.new_ds_flag: True,
}
):
response = self.client.put(
self.url,
format="json",
HTTP_AUTHORIZATION=self.authorization,
data={"dynamicSamplingBiases": new_biases},
)
assert response.status_code == 200
assert response.data["dynamicSamplingBiases"] == new_biases
assert self.project.get_option("sentry:dynamic_sampling_biases") == new_biases
# Test Get response after dynamic sampling biases are updated
get_response = self.get_success_response(
self.organization.slug, self.project.slug, method="get"
)
assert get_response.data["dynamicSamplingBiases"] == new_biases
def test_put_attempt_new_dynamic_sampling_without_biases_with_correct_flags(self) -> None:
"""
Test when user is on a new plan and is trying to update dynamic sampling features of a new plan with no biases
"""
with Feature({self.new_ds_flag: True}):
response = self.client.put(
self.url,
format="json",
HTTP_AUTHORIZATION=self.authorization,
data={"dynamicSamplingBiases": []},
)
assert response.status_code == 200
assert response.data["dynamicSamplingBiases"] == DEFAULT_BIASES
assert self.project.get_option("sentry:dynamic_sampling_biases") == DEFAULT_BIASES
# Test Get response after dynamic sampling biases are updated
get_response = self.get_success_response(
self.organization.slug, self.project.slug, method="get"
)
assert get_response.data["dynamicSamplingBiases"] == DEFAULT_BIASES
def test_put_new_dynamic_sampling_incorrect_rules_with_correct_flags(self) -> None:
new_biases = [
{"id": "foo", "active": False},
]
with Feature(
{
self.new_ds_flag: True,
}
):
response = self.client.put(
self.url,
format="json",
HTTP_AUTHORIZATION=self.authorization,
data={"dynamicSamplingBiases": new_biases},
)
assert response.status_code == 400
assert response.json()["dynamicSamplingBiases"][0]["id"] == [
'"foo" is not a valid choice.'
]
new_biases = [
{"whatever": "foo", "bla": False},
]
response = self.client.put(
self.url,
format="json",
HTTP_AUTHORIZATION=self.authorization,
data={"dynamicSamplingBiases": new_biases},
)
assert response.status_code == 400
assert response.json()["dynamicSamplingBiases"][0]["non_field_errors"] == [
"Error: Only 'id' and 'active' fields are allowed for bias."
]
def test_dynamic_sampling_bias_enable_audit_log(self) -> None:
"""Test that enabling a dynamic sampling bias creates the correct audit log entry."""
with self.feature("organizations:dynamic-sampling"):
# Start with default biases
with outbox_runner():
self.get_success_response(
self.org_slug, self.proj_slug, dynamicSamplingBiases=DEFAULT_BIASES
)
# Enable a specific bias
updated_biases = [
{"id": RuleType.MINIMUM_SAMPLE_RATE_RULE.value, "active": True},
]
self.get_success_response(
self.org_slug, self.proj_slug, dynamicSamplingBiases=updated_biases
)
# Check audit log entry was created
with assume_test_silo_mode(SiloMode.CONTROL):
audit_entry = AuditLogEntry.objects.get(
organization_id=self.organization.id,
event=audit_log.get_event_id("SAMPLING_BIAS_ENABLED"),
target_object=self.project.id,
)
assert audit_entry is not None
assert audit_entry.data["name"] == RuleType.MINIMUM_SAMPLE_RATE_RULE.value
def test_dynamic_sampling_bias_disable_audit_log(self) -> None:
"""Test that disabling a dynamic sampling bias creates the correct audit log entry."""
with self.feature("organizations:dynamic-sampling"):
# Start with a bias enabled
with outbox_runner():
initial_biases = [{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": True}]
self.get_success_response(
self.org_slug, self.proj_slug, dynamicSamplingBiases=initial_biases
)
# Disable the bias
disabled_biases = [{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False}]
self.get_success_response(
self.org_slug, self.proj_slug, dynamicSamplingBiases=disabled_biases
)
# Check audit log entry was created
with assume_test_silo_mode(SiloMode.CONTROL):
audit_entry = AuditLogEntry.objects.get(
organization_id=self.organization.id,
event=audit_log.get_event_id("SAMPLING_BIAS_DISABLED"),
target_object=self.project.id,
)
assert audit_entry is not None
assert audit_entry.data["name"] == RuleType.BOOST_ENVIRONMENTS_RULE.value
def test_dynamic_sampling_bias_add_new_bias_audit_log(self) -> None:
"""Test that adding a new bias to existing biases creates the correct audit log entry."""
with self.feature("organizations:dynamic-sampling"):
# Start with some initial biases
initial_biases = [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_LATEST_RELEASES_RULE.value, "active": False},
]
with outbox_runner():
self.get_success_response(
self.org_slug, self.proj_slug, dynamicSamplingBiases=initial_biases
)
# Add a new bias that's enabled
expanded_biases = [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_LATEST_RELEASES_RULE.value, "active": False},
{"id": RuleType.MINIMUM_SAMPLE_RATE_RULE.value, "active": True},
]
self.get_success_response(
self.org_slug, self.proj_slug, dynamicSamplingBiases=expanded_biases
)
# Check audit log entry was created for the newly enabled bias
with assume_test_silo_mode(SiloMode.CONTROL):
audit_entry = AuditLogEntry.objects.get(
organization_id=self.organization.id,
event=audit_log.get_event_id("SAMPLING_BIAS_ENABLED"),
target_object=self.project.id,
)
assert audit_entry is not None
assert audit_entry.data["name"] == RuleType.MINIMUM_SAMPLE_RATE_RULE.value
def test_dynamic_sampling_bias_add_new_inactive_bias_no_audit_log(self) -> None:
"""Test that adding a new bias as inactive does not create an audit log entry."""
with self.feature("organizations:dynamic-sampling"):
# Start with some initial biases
initial_biases = [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_LATEST_RELEASES_RULE.value, "active": False},
]
with outbox_runner():
self.get_success_response(
self.org_slug, self.proj_slug, dynamicSamplingBiases=initial_biases
)
# Add a new bias that's inactive
expanded_biases = [
{"id": RuleType.BOOST_ENVIRONMENTS_RULE.value, "active": False},
{"id": RuleType.BOOST_LATEST_RELEASES_RULE.value, "active": False},
{"id": RuleType.MINIMUM_SAMPLE_RATE_RULE.value, "active": False},
]
self.get_success_response(
self.org_slug, self.proj_slug, dynamicSamplingBiases=expanded_biases
)
# Check that no audit log entry was created for the inactive bias
with assume_test_silo_mode(SiloMode.CONTROL):
audit_entries = AuditLogEntry.objects.filter(
organization_id=self.organization.id,
event=audit_log.get_event_id("SAMPLING_BIAS_ENABLED"),
target_object=self.project.id,
)
assert audit_entries.count() == 0
| TestProjectDetailsDynamicSamplingBiases |
python | tensorflow__tensorflow | tensorflow/python/distribute/input_lib_type_spec_test.py | {
"start": 13029,
"end": 20873
} | class ____(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=["eager"],
tf_api_version=2,
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
enable_get_next_as_optional=[True, False]))
def testTypeSpecBase(self, distribution, enable_get_next_as_optional):
def create_dataset():
dataset = dataset_ops.DatasetV2.range(10).batch(2)
return dataset
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
dist_dataset = distribution.experimental_distribute_dataset(
create_dataset())
spec = dist_dataset._type_spec
self.assertEqual(spec._input_workers, dist_dataset._input_workers)
self.assertEqual(
spec._element_spec._value_specs,
(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.int64, name=None),
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.int64, name=None)))
@combinations.generate(
combinations.combine(
mode=["eager"],
tf_api_version=2,
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
],
enable_get_next_as_optional=[True, False]))
def testTypeSpecReturnedFromTFFunction(self, distribution,
enable_get_next_as_optional):
# TODO(ishark): This is observed when tensor is copied from one device to
# other and since DatasetVariantWrapper does not have a copy
# function. Some Context: b/146981184
# Try to renable with non-canonicalized input workers, which
# helped in PS Strategy for similar error.
self.skipTest("Failures observed in Ubuntu presubmit: No unary variant "
"device copy function found for direction: 1 and Variant "
"type_index:tensorflow::data::(anonymous namespace)::"
"DatasetVariantWrapper")
@def_function.function
def create_dist_dataset():
dataset = dataset_ops.DatasetV2.range(10).batch(2)
return distribution.experimental_distribute_dataset(dataset)
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
dist_dataset = create_dist_dataset()
spec = dist_dataset._type_spec
self.assertEqual(spec._input_workers, dist_dataset._input_workers)
self.assertEqual(
spec._element_spec._value_specs,
(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.int64, name=None),
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.int64, name=None)))
# Read distributed data to confirm values are correct.
iterator = iter(dist_dataset)
data = []
for it in iterator:
data.append(distribution.experimental_local_results(it))
self.assertAllEqual(
nest.flatten(data),
list(dataset_ops.DatasetV2.range(10).batch(1).as_numpy_iterator()))
@combinations.generate(
combinations.combine(
mode=["eager"],
tf_api_version=2,
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
enable_get_next_as_optional=[True, False]))
def testTypeSpecRaggedTensor(self, distribution, enable_get_next_as_optional):
ctx = distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(8)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
dataset = dataset.batch(batch_size)
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
spec = dist_dataset._type_spec
self.assertEqual(spec._input_workers, dist_dataset._input_workers)
self.assertEqual(
spec._element_spec, {
"sparse":
values.PerReplicaSpec(
sparse_tensor.SparseTensorSpec(
tensor_shape.TensorShape([None, 3]), dtypes.float32),
sparse_tensor.SparseTensorSpec(
tensor_shape.TensorShape([None, 3]), dtypes.float32)),
"dense":
values.PerReplicaSpec(
tensor_spec.TensorSpec(
shape=(None, 3), dtype=dtypes.float32, name=None),
tensor_spec.TensorSpec(
shape=(None, 3), dtype=dtypes.float32, name=None)),
"ragged":
values.PerReplicaSpec(
ragged_tensor_lib.RaggedTensorSpec(
tensor_shape.TensorShape([None, None]), dtypes.float32,
1, dtypes.int64),
ragged_tensor_lib.RaggedTensorSpec(
tensor_shape.TensorShape([None, None]), dtypes.float32,
1, dtypes.int64))
})
@combinations.generate(
combinations.combine(
mode=["eager"],
tf_api_version=2,
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
enable_get_next_as_optional=[True, False],
experimental_place_dataset_on_device=[True, False],
experimental_fetch_to_device=[True, False]))
def testTypeSpecComponents(self, distribution, enable_get_next_as_optional,
experimental_place_dataset_on_device,
experimental_fetch_to_device):
dataset = dataset_ops.DatasetV2.range(10).batch(2)
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
options = distribute_lib.InputOptions(
experimental_place_dataset_on_device=
experimental_place_dataset_on_device,
experimental_fetch_to_device=experimental_fetch_to_device)
dist_dataset = distribution.experimental_distribute_dataset(
dataset, options)
spec = dist_dataset._type_spec
self.assertEqual(spec._input_workers, dist_dataset._input_workers)
self.assertEqual(
spec._element_spec._value_specs,
(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.int64, name=None),
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.int64, name=None)))
components = spec._to_components(dist_dataset)
re_dist_dataset = spec._from_components(components)
self.assertEqual(dist_dataset._input_workers,
re_dist_dataset._input_workers)
self.assertAllEqual(dist_dataset._cloned_datasets,
re_dist_dataset._cloned_datasets)
self.assertEqual(dist_dataset._element_spec, re_dist_dataset._element_spec)
self.assertEqual(dist_dataset._enable_get_next_as_optional,
re_dist_dataset._enable_get_next_as_optional)
self.assertEqual(dist_dataset._options, re_dist_dataset._options)
| DistributedDatasetTypeSpecTest |
python | pydantic__pydantic | tests/typechecking/base_model.py | {
"start": 720,
"end": 1350
} | class ____(BaseModel):
title: str = Field(default='Sir Lancelot') # this is okay
age: int = Field(23) # this works fine at runtime but will case an error for pyright
k = Knight() # type: ignore[call-arg] # pyright: ignore[reportCallIssue]
assert_type(Knight.model_fields, dict[str, FieldInfo])
assert_type(Knight.model_computed_fields, dict[str, ComputedFieldInfo])
assert_type(k.model_fields, dict[str, FieldInfo]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated]
assert_type(k.model_computed_fields, dict[str, ComputedFieldInfo]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated]
| Knight |
python | altair-viz__altair | tools/generate_schema_wrapper.py | {
"start": 3033,
"end": 6056
} | class ____:
_encoding_name: str
def to_dict(
self,
validate: bool = True,
ignore: list[str] | None = None,
context: dict[str, Any] | None = None,
) -> dict | list[dict]:
context = context or {}
ignore = ignore or []
shorthand = self._get("shorthand") # type: ignore[attr-defined]
field = self._get("field") # type: ignore[attr-defined]
if shorthand is not Undefined and field is not Undefined:
msg = f"{self.__class__.__name__} specifies both shorthand={shorthand} and field={field}. "
raise ValueError(msg)
if isinstance(shorthand, (tuple, list)):
# If given a list of shorthands, then transform it to a list of classes
kwds = self._kwds.copy() # type: ignore[attr-defined]
kwds.pop("shorthand")
return [
self.__class__(sh, **kwds).to_dict( # type: ignore[call-arg]
validate=validate, ignore=ignore, context=context
)
for sh in shorthand
]
if shorthand is Undefined:
parsed = {}
elif isinstance(shorthand, str):
data: nw.DataFrame | Any = context.get("data", None)
parsed = parse_shorthand(shorthand, data=data)
type_required = "type" in self._kwds # type: ignore[attr-defined]
type_in_shorthand = "type" in parsed
type_defined_explicitly = self._get("type") is not Undefined # type: ignore[attr-defined]
if not type_required:
# Secondary field names don't require a type argument in VegaLite 3+.
# We still parse it out of the shorthand, but drop it here.
parsed.pop("type", None)
elif not (type_in_shorthand or type_defined_explicitly):
if isinstance(data, nw.DataFrame):
msg = (
f'Unable to determine data type for the field "{shorthand}";'
" verify that the field name is not misspelled."
" If you are referencing a field from a transform,"
" also confirm that the data type is specified correctly."
)
raise ValueError(msg)
else:
msg = (
f"{shorthand} encoding field is specified without a type; "
"the type cannot be automatically inferred because "
"the data is not specified as a pandas.DataFrame."
)
raise ValueError(msg)
else:
# Shorthand is not a string; we pass the definition to field,
# and do not do any parsing.
parsed = {"field": shorthand}
context["parsed_shorthand"] = parsed
return super(FieldChannelMixin, self).to_dict(
validate=validate, ignore=ignore, context=context
)
| FieldChannelMixin |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/unsat_provider/package.py | {
"start": 216,
"end": 583
} | class ____(Package):
"""This package has a dependency on a virtual that cannot be provided"""
homepage = "http://www.example.com"
url = "http://www.example.com/v1.0.tgz"
version("1.0", sha256="0123456789abcdef0123456789abcdef")
variant("foo", default=True, description="")
provides("unsatvdep", when="+foo")
conflicts("+foo")
| UnsatProvider |
python | ray-project__ray | doc/source/ray-core/doc_code/ray-dag.py | {
"start": 513,
"end": 3463
} | class ____:
def __init__(self, init_value):
self.i = init_value
def inc(self, x):
self.i += x
def get(self):
return self.i
a1 = Actor.bind(10) # Instantiate Actor with init_value 10.
val = a1.get.bind() # ClassMethod that returns value from get() from
# the actor created.
assert ray.get(val.execute()) == 10
@ray.remote
def combine(x, y):
return x + y
a2 = Actor.bind(10) # Instantiate another Actor with init_value 10.
a1.inc.bind(2) # Call inc() on the actor created with increment of 2.
a1.inc.bind(4) # Call inc() on the actor created with increment of 4.
a2.inc.bind(6) # Call inc() on the actor created with increment of 6.
# Combine outputs from a1.get() and a2.get()
dag = combine.bind(a1.get.bind(), a2.get.bind())
# a1 + a2 + inc(2) + inc(4) + inc(6)
# 10 + (10 + ( 2 + 4 + 6)) = 32
assert ray.get(dag.execute()) == 32
# __dag_actors_end__
# fmt: on
ray.shutdown()
# fmt: off
# __dag_input_node_begin__
import ray
ray.init()
from ray.dag.input_node import InputNode
@ray.remote
def a(user_input):
return user_input * 2
@ray.remote
def b(user_input):
return user_input + 1
@ray.remote
def c(x, y):
return x + y
with InputNode() as dag_input:
a_ref = a.bind(dag_input)
b_ref = b.bind(dag_input)
dag = c.bind(a_ref, b_ref)
# a(2) + b(2) = c
# (2 * 2) + (2 + 1)
assert ray.get(dag.execute(2)) == 7
# a(3) + b(3) = c
# (3 * 2) + (3 + 1)
assert ray.get(dag.execute(3)) == 10
# __dag_input_node_end__
# fmt: on
# fmt: off
# __dag_multi_output_node_begin__
import ray
from ray.dag.input_node import InputNode
from ray.dag.output_node import MultiOutputNode
@ray.remote
def f(input):
return input + 1
with InputNode() as input_data:
dag = MultiOutputNode([f.bind(input_data["x"]), f.bind(input_data["y"])])
refs = dag.execute({"x": 1, "y": 2})
assert ray.get(refs) == [2, 3]
# __dag_multi_output_node_end__
# fmt: on
# fmt: off
# __dag_multi_output_node_begin__
import ray
from ray.dag.input_node import InputNode
from ray.dag.output_node import MultiOutputNode
@ray.remote
def f(input):
return input + 1
with InputNode() as input_data:
dag = MultiOutputNode([f.bind(input_data["x"]), f.bind(input_data["y"])])
refs = dag.execute({"x": 1, "y": 2})
assert ray.get(refs) == [2, 3]
# __dag_multi_output_node_end__
# fmt: on
# fmt: off
# __dag_multi_output_node_begin__
import ray
from ray.dag.input_node import InputNode
from ray.dag.output_node import MultiOutputNode
@ray.remote
def f(input):
return input + 1
with InputNode() as input_data:
dag = MultiOutputNode([f.bind(input_data["x"]), f.bind(input_data["y"])])
refs = dag.execute({"x": 1, "y": 2})
assert ray.get(refs) == [2, 3]
# __dag_multi_output_node_end__
# fmt: on
# fmt: off
# __dag_actor_reuse_begin__
import ray
from ray.dag.input_node import InputNode
from ray.dag.output_node import MultiOutputNode
@ray.remote
| Actor |
python | mlflow__mlflow | examples/auth/auth.py | {
"start": 46,
"end": 1878
} | class ____:
MLFLOW_TRACKING_USERNAME = "MLFLOW_TRACKING_USERNAME"
MLFLOW_TRACKING_PASSWORD = "MLFLOW_TRACKING_PASSWORD"
def __init__(self, username, password) -> None:
self.username = username
self.password = password
self.env = {}
def _record_env_var(self, key):
if key := os.getenv(key):
self.env[key] = key
def _restore_env_var(self, key):
if value := self.env.get(key):
os.environ[key] = value
else:
del os.environ[key]
def __enter__(self):
self._record_env_var(User.MLFLOW_TRACKING_USERNAME)
self._record_env_var(User.MLFLOW_TRACKING_PASSWORD)
os.environ[User.MLFLOW_TRACKING_USERNAME] = self.username
os.environ[User.MLFLOW_TRACKING_PASSWORD] = self.password
return self
def __exit__(self, *_exc):
self._restore_env_var(User.MLFLOW_TRACKING_USERNAME)
self._restore_env_var(User.MLFLOW_TRACKING_PASSWORD)
self.env.clear()
tracking_uri = "http://localhost:5000"
mlflow.set_tracking_uri(tracking_uri)
client = mlflow.server.get_app_client("basic-auth", tracking_uri)
A = User("user_a", "password_a")
B = User("user_b", "password_b")
with A:
exp_a = mlflow.set_experiment(uuid.uuid4().hex)
with mlflow.start_run():
mlflow.log_metric("a", 1)
with B:
mlflow.set_experiment(exp_a.name)
try:
with mlflow.start_run(): # not allowed
mlflow.log_metric("b", 2)
except Exception as e:
print(str(e))
# Grant B permission to edit A's experiment
with A:
client.create_experiment_permission(str(exp_a.experiment_id), B.username, "EDIT")
# B can edit now, should be able to log a metric
with B:
mlflow.set_experiment(exp_a.name)
with mlflow.start_run():
mlflow.log_metric("b", 2)
| User |
python | django__django | tests/model_fields/test_uuid.py | {
"start": 479,
"end": 2362
} | class ____(TestCase):
def test_uuid_instance(self):
instance = UUIDModel.objects.create(field=uuid.uuid4())
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, instance.field)
def test_str_instance_no_hyphens(self):
UUIDModel.objects.create(field="550e8400e29b41d4a716446655440000")
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID("550e8400e29b41d4a716446655440000"))
def test_str_instance_hyphens(self):
UUIDModel.objects.create(field="550e8400-e29b-41d4-a716-446655440000")
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID("550e8400e29b41d4a716446655440000"))
def test_str_instance_bad_hyphens(self):
UUIDModel.objects.create(field="550e84-00-e29b-41d4-a716-4-466-55440000")
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID("550e8400e29b41d4a716446655440000"))
def test_null_handling(self):
NullableUUIDModel.objects.create(field=None)
loaded = NullableUUIDModel.objects.get()
self.assertIsNone(loaded.field)
def test_pk_validated(self):
with self.assertRaisesMessage(
exceptions.ValidationError, "is not a valid UUID"
):
PrimaryKeyUUIDModel.objects.get(pk={})
with self.assertRaisesMessage(
exceptions.ValidationError, "is not a valid UUID"
):
PrimaryKeyUUIDModel.objects.get(pk=[])
def test_wrong_value(self):
with self.assertRaisesMessage(
exceptions.ValidationError, "is not a valid UUID"
):
UUIDModel.objects.get(field="not-a-uuid")
with self.assertRaisesMessage(
exceptions.ValidationError, "is not a valid UUID"
):
UUIDModel.objects.create(field="not-a-uuid")
| TestSaveLoad |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/__init__.py | {
"start": 5551,
"end": 5765
} | class ____:
PIPE = -1
STDOUT = -2
returncode: int | None = None
def __init__(self, *args, **kwargs):
self.stdout = MockStdOut(*args, **kwargs)
def wait(self):
return
| MockSubProcess |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias11.py | {
"start": 200,
"end": 573
} | class ____(Generic[_T]):
def __init__(self, x: _T):
pass
A = ClassA
reveal_type(A(3), expected_text="ClassA[int]")
TA1 = collections.OrderedDict
TA2 = OrderedDict
TA1[int, int]
TA2[int, int]
TA3 = TA1
TA3[int, int]
TA4 = dict | OrderedDict
# This should generate two errors because the two types in TA4
# are already specialized.
x: TA4[int, int]
| ClassA |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/connectors/aioodbc.py | {
"start": 3045,
"end": 4331
} | class ____(AsyncAdapt_dbapi_module):
def __init__(self, aioodbc, pyodbc):
super().__init__(aioodbc, dbapi_module=pyodbc)
self.aioodbc = aioodbc
self.pyodbc = pyodbc
self.paramstyle = pyodbc.paramstyle
self._init_dbapi_attributes()
self.Cursor = AsyncAdapt_dbapi_cursor
self.version = pyodbc.version
def _init_dbapi_attributes(self):
for name in (
"Warning",
"Error",
"InterfaceError",
"DataError",
"DatabaseError",
"OperationalError",
"InterfaceError",
"IntegrityError",
"ProgrammingError",
"InternalError",
"NotSupportedError",
"NUMBER",
"STRING",
"DATETIME",
"BINARY",
"Binary",
"BinaryNull",
"SQL_VARCHAR",
"SQL_WVARCHAR",
):
setattr(self, name, getattr(self.pyodbc, name))
def connect(self, *arg, **kw):
creator_fn = kw.pop("async_creator_fn", self.aioodbc.connect)
return await_(
AsyncAdapt_aioodbc_connection.create(
self,
creator_fn(*arg, **kw),
)
)
| AsyncAdapt_aioodbc_dbapi |
python | huggingface__transformers | tests/models/reformer/test_tokenization_reformer.py | {
"start": 301,
"end": 2703
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = ["google/reformer-crime-and-punishment"]
tokenizer_class = ReformerTokenizer
test_tokenizer_from_extractor = False # no Tokenizer Backend yet
from_pretrained_kwargs = {}
integration_expected_tokens = ['▁T', 'h', 'is', '▁is', '▁a', '▁t', 'est', '▁', '<unk>', '▁I', '▁was', '▁b', 'or', 'n', '▁in', '▁', '<unk>', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<unk>', '.', '▁', '<unk>', '▁H', 'i', '▁He', 'll', 'o', '▁H', 'i', '▁He', 'll', 'o', '▁He', 'll', 'o', '▁', '<unk>', 's', '<unk>', '▁h', 'i', '<unk>', 's', '<unk>', 't', 'he', 're', '▁The', '▁f', 'o', 'll', 'ow', 'ing', '▁st', 'r', 'ing', '▁sh', 'ould', '▁be', '▁p', 'ro', 'p', 'er', 'ly', '▁', 'en', 'c', 'od', 'ed', ':', '▁He', 'll', 'o', '.', '▁But', '▁', 'ir', 'd', '▁and', '▁', '<unk>', '▁', 'ir', 'd', '▁', '<unk>', '▁He', 'y', '▁h', 'ow', '▁are', '▁you', '▁do', 'ing'] # fmt: skip
integration_expected_token_ids = [108, 265, 24, 111, 4, 3, 249, 258, 0, 33, 59, 17, 38, 263, 39, 258, 0, 277, 27, 221, 111, 22, 94, 266, 0, 278, 258, 0, 96, 264, 126, 32, 262, 96, 264, 126, 32, 262, 126, 32, 262, 258, 0, 266, 0, 31, 264, 0, 266, 0, 260, 5, 10, 140, 22, 262, 32, 77, 20, 74, 267, 20, 168, 106, 49, 40, 186, 279, 16, 48, 258, 25, 274, 227, 19, 315, 126, 32, 262, 278, 231, 258, 91, 268, 27, 258, 0, 258, 91, 268, 258, 0, 126, 272, 31, 77, 157, 41, 137, 20] # fmt: skip
expected_tokens_from_ids = ['▁T', 'h', 'is', '▁is', '▁a', '▁t', 'est', '▁', '<unk>', '▁I', '▁was', '▁b', 'or', 'n', '▁in', '▁', '<unk>', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<unk>', '.', '▁', '<unk>', '▁H', 'i', '▁He', 'll', 'o', '▁H', 'i', '▁He', 'll', 'o', '▁He', 'll', 'o', '▁', '<unk>', 's', '<unk>', '▁h', 'i', '<unk>', 's', '<unk>', 't', 'he', 're', '▁The', '▁f', 'o', 'll', 'ow', 'ing', '▁st', 'r', 'ing', '▁sh', 'ould', '▁be', '▁p', 'ro', 'p', 'er', 'ly', '▁', 'en', 'c', 'od', 'ed', ':', '▁He', 'll', 'o', '.', '▁But', '▁', 'ir', 'd', '▁and', '▁', '<unk>', '▁', 'ir', 'd', '▁', '<unk>', '▁He', 'y', '▁h', 'ow', '▁are', '▁you', '▁do', 'ing'] # fmt: skip
integration_expected_decoded_text = "This is a test <unk> I was born in <unk>, and this is fals<unk>. <unk> Hi Hello Hi Hello Hello <unk>s<unk> hi<unk>s<unk>there The following string should be properly encoded: Hello. But ird and <unk> ird <unk> Hey how are you doing"
| ReformerTokenizationTest |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0064_healthcheck.py | {
"start": 149,
"end": 522
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("builds", "0063_alter_buildcommandresult"),
]
operations = [
migrations.AddField(
model_name="build",
name="healthcheck",
field=models.DateTimeField(blank=True, null=True, verbose_name="Healthcheck"),
),
]
| Migration |
python | pypa__pip | src/pip/_internal/commands/search.py | {
"start": 828,
"end": 957
} | class ____(TypedDict):
name: str
summary: str
versions: list[str]
logger = logging.getLogger(__name__)
| TransformedHit |
python | numpy__numpy | numpy/random/tests/test_generator_mt19937.py | {
"start": 4209,
"end": 6415
} | class ____:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multinomial_pvals_float32(self):
x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09,
1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32)
pvals = x / x.sum()
random = Generator(MT19937(1432985819))
match = r"[\w\s]*pvals array is cast to 64-bit floating"
with pytest.raises(ValueError, match=match):
random.multinomial(1, pvals)
| TestMultinomial |
python | weaviate__weaviate-python-client | weaviate/backup/backup_location.py | {
"start": 286,
"end": 418
} | class ____(_BackupLocationConfig):
"""The dynamic location of a backup for filesystem."""
path: str
| _BackupLocationFilesystem |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/base.py | {
"start": 23415,
"end": 24310
} | class ____(Mapped[_T_co], _MappedAttribute[_T_co]):
"""Mixin for :class:`.MapperProperty` subclasses that allows them to
be compatible with ORM-annotated declarative mappings.
"""
__slots__ = ()
# MappedSQLExpression, Relationship, Composite etc. dont actually do
# SQL expression behavior. yet there is code that compares them with
# __eq__(), __ne__(), etc. Since #8847 made Mapped even more full
# featured including ColumnOperators, we need to have those methods
# be no-ops for these objects, so return NotImplemented to fall back
# to normal comparison behavior.
def operate(self, op: OperatorType, *other: Any, **kwargs: Any) -> Any:
return NotImplemented
__sa_operate__ = operate
def reverse_operate(
self, op: OperatorType, other: Any, **kwargs: Any
) -> Any:
return NotImplemented
| _DeclarativeMapped |
python | huggingface__transformers | src/transformers/models/deepseek_v3/modeling_deepseek_v3.py | {
"start": 6790,
"end": 8697
} | class ____(nn.Module):
"""Collection of expert weights stored as 3D tensors."""
def __init__(self, config):
super().__init__()
self.num_experts = config.num_local_experts
self.hidden_dim = config.hidden_size
self.intermediate_dim = config.intermediate_size
self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
self.act_fn = ACT2FN[config.hidden_act]
def forward(
self,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
num_experts = top_k_weights.shape[1]
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=num_experts + 1)
expert_mask = expert_mask.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_idx = expert_idx[0]
if expert_idx == num_experts:
continue
_, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states[token_idx]
gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
current_hidden_states = current_hidden_states * top_k_weights[token_idx, expert_idx, None]
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
return final_hidden_states
| DeepseekV3NaiveMoe |
python | ray-project__ray | ci/ray_ci/builder_container.py | {
"start": 125,
"end": 1539
} | class ____(LinuxContainer):
def __init__(
self,
python_version: str,
build_type: str,
architecture: str,
upload: bool = False,
) -> None:
super().__init__(
"manylinux" if architecture == "x86_64" else f"manylinux-{architecture}",
volumes=[f"{os.environ.get('RAYCI_CHECKOUT_DIR')}:/rayci"],
)
python_version_info = PYTHON_VERSIONS.get(python_version)
assert build_type in BUILD_TYPES, f"build_type must be one of {BUILD_TYPES}"
self.build_type = build_type
self.bin_path = python_version_info["bin_path"]
self.upload = upload
def run(self) -> None:
# chown is required to allow forge to upload the wheel
cmds = []
if self.build_type == "debug":
cmds += ["export RAY_DEBUG_BUILD=debug"]
if os.environ.get("RAYCI_DISABLE_CPP_WHEEL") == "true":
cmds += ["export RAY_DISABLE_EXTRA_CPP=1"]
if os.environ.get("RAYCI_DISABLE_JAVA", "") == "true":
cmds += ["export RAY_INSTALL_JAVA=0"]
cmds += [
"./ci/build/build-manylinux-ray.sh",
f"./ci/build/build-manylinux-wheel.sh {self.bin_path}",
"chown -R 2000:100 /artifact-mount",
]
if self.upload:
cmds += ["./ci/build/copy_build_artifacts.sh wheel"]
self.run_script(cmds)
| BuilderContainer |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 58911,
"end": 60748
} | class ____(BaseModel):
"""
TaskInstanceHistory serializer for responses.
"""
task_id: Annotated[str, Field(title="Task Id")]
dag_id: Annotated[str, Field(title="Dag Id")]
dag_run_id: Annotated[str, Field(title="Dag Run Id")]
map_index: Annotated[int, Field(title="Map Index")]
start_date: Annotated[datetime | None, Field(title="Start Date")] = None
end_date: Annotated[datetime | None, Field(title="End Date")] = None
duration: Annotated[float | None, Field(title="Duration")] = None
state: TaskInstanceState | None = None
try_number: Annotated[int, Field(title="Try Number")]
max_tries: Annotated[int, Field(title="Max Tries")]
task_display_name: Annotated[str, Field(title="Task Display Name")]
dag_display_name: Annotated[str, Field(title="Dag Display Name")]
hostname: Annotated[str | None, Field(title="Hostname")] = None
unixname: Annotated[str | None, Field(title="Unixname")] = None
pool: Annotated[str, Field(title="Pool")]
pool_slots: Annotated[int, Field(title="Pool Slots")]
queue: Annotated[str | None, Field(title="Queue")] = None
priority_weight: Annotated[int | None, Field(title="Priority Weight")] = None
operator: Annotated[str | None, Field(title="Operator")] = None
operator_name: Annotated[str | None, Field(title="Operator Name")] = None
queued_when: Annotated[datetime | None, Field(title="Queued When")] = None
scheduled_when: Annotated[datetime | None, Field(title="Scheduled When")] = None
pid: Annotated[int | None, Field(title="Pid")] = None
executor: Annotated[str | None, Field(title="Executor")] = None
executor_config: Annotated[str, Field(title="Executor Config")]
dag_version: DagVersionResponse | None = None
hitl_detail: HITLDetailHistory | None = None
| TaskInstanceHistoryResponse |
python | encode__starlette | starlette/endpoints.py | {
"start": 2153,
"end": 5099
} | class ____:
encoding: Literal["text", "bytes", "json"] | None = None
def __init__(self, scope: Scope, receive: Receive, send: Send) -> None:
assert scope["type"] == "websocket"
self.scope = scope
self.receive = receive
self.send = send
def __await__(self) -> Generator[Any, None, None]:
return self.dispatch().__await__()
async def dispatch(self) -> None:
websocket = WebSocket(self.scope, receive=self.receive, send=self.send)
await self.on_connect(websocket)
close_code = status.WS_1000_NORMAL_CLOSURE
try:
while True:
message = await websocket.receive()
if message["type"] == "websocket.receive":
data = await self.decode(websocket, message)
await self.on_receive(websocket, data)
elif message["type"] == "websocket.disconnect": # pragma: no branch
close_code = int(message.get("code") or status.WS_1000_NORMAL_CLOSURE)
break
except Exception as exc:
close_code = status.WS_1011_INTERNAL_ERROR
raise exc
finally:
await self.on_disconnect(websocket, close_code)
async def decode(self, websocket: WebSocket, message: Message) -> Any:
if self.encoding == "text":
if "text" not in message:
await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError("Expected text websocket messages, but got bytes")
return message["text"]
elif self.encoding == "bytes":
if "bytes" not in message:
await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError("Expected bytes websocket messages, but got text")
return message["bytes"]
elif self.encoding == "json":
if message.get("text") is not None:
text = message["text"]
else:
text = message["bytes"].decode("utf-8")
try:
return json.loads(text)
except json.decoder.JSONDecodeError:
await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA)
raise RuntimeError("Malformed JSON data received.")
assert self.encoding is None, f"Unsupported 'encoding' attribute {self.encoding}"
return message["text"] if message.get("text") else message["bytes"]
async def on_connect(self, websocket: WebSocket) -> None:
"""Override to handle an incoming websocket connection"""
await websocket.accept()
async def on_receive(self, websocket: WebSocket, data: Any) -> None:
"""Override to handle an incoming websocket message"""
async def on_disconnect(self, websocket: WebSocket, close_code: int) -> None:
"""Override to handle a disconnecting websocket"""
| WebSocketEndpoint |
python | huggingface__transformers | tests/models/canine/test_modeling_canine.py | {
"start": 8533,
"end": 20795
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
CanineModel,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": CanineModel,
"question-answering": CanineForQuestionAnswering,
"text-classification": CanineForSequenceClassification,
"token-classification": CanineForTokenClassification,
"zero-shot": CanineForSequenceClassification,
}
if is_torch_available()
else {}
)
test_mismatched_shapes = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = CanineModelTester(self)
# we set has_text_modality to False as the config has no vocab_size attribute
self.config_tester = ConfigTester(self, config_class=CanineConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
# expected_num_layers equals num_hidden_layers of the deep encoder + 1, + 2 for the first shallow encoder, + 2
# for the final shallow encoder
expected_num_layers = self.model_tester.num_hidden_layers + 1 + 2 + 2
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.seq_length
for i in range(expected_num_layers):
if (i < 2) or ((expected_num_layers - i) < 3):
# the expected length of the hidden_states of the first and final shallow encoders
# is equal to the seq_length
self.assertListEqual(
list(hidden_states[i].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
else:
# the expected length of the hidden_states of the deep encoder need to be updated
# for CANINE since the seq length is downsampled
self.assertListEqual(
list(hidden_states[i].shape[-2:]),
[seq_length // config.downsampling_rate, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
# we add + 2 due to the 2 shallow encoders
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers + 2)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
# we add + 2 due to the 2 shallow encoders
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers + 2)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers + 2)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
print(model_class)
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
@unittest.skip(reason="CANINE does not have a get_input_embeddings() method.")
def test_inputs_embeds(self):
# ViT does not use inputs_embeds
pass
@unittest.skip(reason="Canine Tower does not use inputs_embeds")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="CANINE does not have a get_input_embeddings() method.")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "google/canine-s"
model = CanineModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
| CanineModelTest |
python | astropy__astropy | astropy/time/formats.py | {
"start": 81946,
"end": 90558
} | class ____(TimeDeltaFormat, TimeUnique):
"""Time delta as a string with one or more Quantity components.
This format provides a human-readable multi-scale string representation of a time
delta. It is convenient for applications like a configuration file or a command line
option.
The format is specified as follows:
- The string is a sequence of one or more components.
- Each component is a number followed by an astropy unit of time.
- For input, whitespace within the string is allowed but optional.
- For output, there is a single space between components.
- The allowed components are listed below.
- The order (yr, d, hr, min, s) is fixed but individual components are optional.
The allowed input component units are shown below:
- "yr": years (365.25 days)
- "d": days (24 hours)
- "hr": hours (60 minutes)
- "min": minutes (60 seconds)
- "s": seconds
.. Note:: These definitions correspond to physical units of time and are NOT
calendar date intervals. Thus adding "1yr" to "2000-01-01 00:00:00" will give
"2000-12-31 06:00:00" instead of "2001-01-01 00:00:00".
The ``out_subfmt`` attribute specifies the components to be included in the string
output. The default is ``"multi"`` which represents the time delta as
``"<days>d <hours>hr <minutes>min <seconds>s"``, where only non-zero components are
included.
- "multi": multiple components, e.g. "2d 3hr 15min 5.6s"
- "yr": years
- "d": days
- "hr": hours
- "min": minutes
- "s": seconds
Examples
--------
>>> from astropy.time import Time, TimeDelta
>>> import astropy.units as u
>>> print(TimeDelta("1yr"))
365d 6hr
>>> print(Time("2000-01-01") + TimeDelta("1yr"))
2000-12-31 06:00:00.000
>>> print(TimeDelta("+3.6d"))
3d 14hr 24min
>>> print(TimeDelta("-3.6d"))
-3d 14hr 24min
>>> print(TimeDelta("1yr 3.6d", out_subfmt="d"))
368.85d
>>> td = TimeDelta(40 * u.hr)
>>> print(td.to_value(format="quantity_str"))
1d 16hr
>>> print(td.to_value(format="quantity_str", subfmt="d"))
1.667d
>>> td.precision = 9
>>> print(td.to_value(format="quantity_str", subfmt="d"))
1.666666667d
"""
name = "quantity_str"
subfmts = (
("multi", None, None),
("yr", None, None),
("d", None, None),
("hr", None, None),
("min", None, None),
("s", None, None),
)
# Regex to parse "1.02yr 2.2d 3.12hr 4.322min 5.6s" where each element is optional
# but the order is fixed. Each element is a float with optional exponent. Each
# element is named.
re_float = r"(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?"
re_ydhms = re.compile(
rf"""^ \s*
(?P<sign>[-+])? \s* # Optional sign
(?=[^-+\s]) # At least one character which is not a sign or whitespace
((?P<yr>{re_float}) \s* yr \s*)?
((?P<d>{re_float}) \s* d \s*)?
((?P<hr>{re_float}) \s* hr \s*)?
((?P<min>{re_float}) \s* min \s*)?
((?P<s>{re_float}) \s* s)?
\s* $
""",
re.VERBOSE,
)
def _check_val_type(self, val1, val2):
if val1.dtype.kind not in ("S", "U") and val1.size:
raise TypeError(f"Input values for {self.name} class must be strings")
if val2 is not None:
raise ValueError(
f"{self.name} objects do not accept a val2 but you provided {val2}"
)
return val1, None
def parse_string(self, timestr):
"""Read time from a single string"""
components = ("yr", "d", "hr", "min", "s")
if (match := self.re_ydhms.match(timestr)) is None:
raise ValueError(
f"Time delta '{timestr}' does not match {self.name} format"
)
tm = match.groupdict()
vals = [float(tm[component] or 0.0) for component in components]
if tm["sign"] == "-":
vals = [-val for val in vals]
return vals
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and get jd1, jd2."""
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (
str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer(
[val1, None, None, None, None, None],
flags=["zerosize_ok"],
op_dtypes=[None] + 5 * [np.double],
)
for val, yr, day, hr, min, sec in iterator:
val = to_string(val)
(
yr[...],
day[...],
hr[...],
min[...],
sec[...],
) = self.parse_string(val)
yrs, days, hrs, mins, secs = iterator.operands[1:]
jd1 = yrs * 365.25 + days # Exact in the case that yrs and days are integer
jd2 = hrs / 24.0 + mins / 1440.0 + secs / 86400.0 # Inexact
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, parent=None, out_subfmt=None):
out_subfmt = out_subfmt or self.out_subfmt
subfmt = self._get_allowed_subfmt(out_subfmt)
iterator = np.nditer(
[self.jd1, self.jd2, None],
flags=["refs_ok", "zerosize_ok"],
op_dtypes=[None, None, object],
)
for jd1, jd2, out in iterator:
jd = jd1 + jd2
if jd < 0:
jd1, jd2, jd = -jd1, -jd2, -jd # Flip all signs
sign = "-"
else:
sign = ""
if subfmt in ["*", "multi"]:
comps = self.get_multi_comps(jd1, jd2)
else:
value = (jd * u.day).to_value(subfmt)
value = np.round(value, self.precision)
comps = [f"{value}{subfmt}"]
out[...] = sign + " ".join(comps)
return iterator.operands[-1]
def get_multi_comps(self, jd1, jd2):
jd, remainder = two_sum(jd1, jd2)
days = int(np.floor(jd))
jd -= days
jd += remainder
hours = int(np.floor(jd * 24.0))
jd -= hours / 24.0
mins = int(np.floor(jd * 1440.0))
jd -= mins / 1440.0
secs = np.round(jd * 86400.0, self.precision)
comp_vals = [days, hours, mins, secs]
if secs >= 60.0:
self.fix_comp_vals_overflow(comp_vals)
comps = [
f"{comp_val}{name}"
for comp_val, name in zip(comp_vals, ("d", "hr", "min", "s"))
if comp_val != 0
]
if not comps:
comps = ["0.0s"]
return comps
@staticmethod
def fix_comp_vals_overflow(comp_vals):
comp_maxes = (None, 24, 60, 60.0)
for ii in [3, 2, 1]:
comp_val = comp_vals[ii]
comp_max = comp_maxes[ii]
if comp_val >= comp_max:
comp_vals[ii] -= comp_max
comp_vals[ii - 1] += 1
@property
def value(self):
return self.to_value()
def _validate_jd_for_storage(jd):
if isinstance(jd, (float, int)):
return np.array(jd, dtype=float)
if isinstance(jd, np.generic) and (
(jd.dtype.kind == "f" and jd.dtype.itemsize <= 8) or jd.dtype.kind in "iu"
):
return np.array(jd, dtype=float)
elif isinstance(jd, np.ndarray) and jd.dtype.kind == "f" and jd.dtype.itemsize == 8:
return jd
else:
raise TypeError(
"JD values must be arrays (possibly zero-dimensional) "
f"of floats but we got {jd!r} of type {type(jd)}"
)
def _broadcast_writeable(jd1, jd2):
if jd1.shape == jd2.shape:
return jd1, jd2
# When using broadcast_arrays, *both* are flagged with
# warn-on-write, even the one that wasn't modified, and
# require "C" only clears the flag if it actually copied
# anything.
shape = np.broadcast(jd1, jd2).shape
if jd1.shape == shape:
s_jd1 = jd1
else:
s_jd1 = np.require(np.broadcast_to(jd1, shape), requirements=["C", "W"])
if jd2.shape == shape:
s_jd2 = jd2
else:
s_jd2 = np.require(np.broadcast_to(jd2, shape), requirements=["C", "W"])
return s_jd1, s_jd2
# Import symbols from core.py that are used in this module. This succeeds
# because __init__.py imports format.py just before core.py.
from .core import TIME_DELTA_SCALES, TIME_SCALES, ScaleValueError, Time # noqa: E402
| TimeDeltaQuantityString |
python | encode__httpx | httpx/_client.py | {
"start": 5330,
"end": 19412
} | class ____:
def __init__(
self,
*,
auth: AuthTypes | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
follow_redirects: bool = False,
max_redirects: int = DEFAULT_MAX_REDIRECTS,
event_hooks: None | (typing.Mapping[str, list[EventHook]]) = None,
base_url: URL | str = "",
trust_env: bool = True,
default_encoding: str | typing.Callable[[bytes], str] = "utf-8",
) -> None:
event_hooks = {} if event_hooks is None else event_hooks
self._base_url = self._enforce_trailing_slash(URL(base_url))
self._auth = self._build_auth(auth)
self._params = QueryParams(params)
self.headers = Headers(headers)
self._cookies = Cookies(cookies)
self._timeout = Timeout(timeout)
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self._event_hooks = {
"request": list(event_hooks.get("request", [])),
"response": list(event_hooks.get("response", [])),
}
self._trust_env = trust_env
self._default_encoding = default_encoding
self._state = ClientState.UNOPENED
@property
def is_closed(self) -> bool:
"""
Check if the client being closed
"""
return self._state == ClientState.CLOSED
@property
def trust_env(self) -> bool:
return self._trust_env
def _enforce_trailing_slash(self, url: URL) -> URL:
if url.raw_path.endswith(b"/"):
return url
return url.copy_with(raw_path=url.raw_path + b"/")
def _get_proxy_map(
self, proxy: ProxyTypes | None, allow_env_proxies: bool
) -> dict[str, Proxy | None]:
if proxy is None:
if allow_env_proxies:
return {
key: None if url is None else Proxy(url=url)
for key, url in get_environment_proxies().items()
}
return {}
else:
proxy = Proxy(url=proxy) if isinstance(proxy, (str, URL)) else proxy
return {"all://": proxy}
@property
def timeout(self) -> Timeout:
return self._timeout
@timeout.setter
def timeout(self, timeout: TimeoutTypes) -> None:
self._timeout = Timeout(timeout)
@property
def event_hooks(self) -> dict[str, list[EventHook]]:
return self._event_hooks
@event_hooks.setter
def event_hooks(self, event_hooks: dict[str, list[EventHook]]) -> None:
self._event_hooks = {
"request": list(event_hooks.get("request", [])),
"response": list(event_hooks.get("response", [])),
}
@property
def auth(self) -> Auth | None:
"""
Authentication class used when none is passed at the request-level.
See also [Authentication][0].
[0]: /quickstart/#authentication
"""
return self._auth
@auth.setter
def auth(self, auth: AuthTypes) -> None:
self._auth = self._build_auth(auth)
@property
def base_url(self) -> URL:
"""
Base URL to use when sending requests with relative URLs.
"""
return self._base_url
@base_url.setter
def base_url(self, url: URL | str) -> None:
self._base_url = self._enforce_trailing_slash(URL(url))
@property
def headers(self) -> Headers:
"""
HTTP headers to include when sending requests.
"""
return self._headers
@headers.setter
def headers(self, headers: HeaderTypes) -> None:
client_headers = Headers(
{
b"Accept": b"*/*",
b"Accept-Encoding": ACCEPT_ENCODING.encode("ascii"),
b"Connection": b"keep-alive",
b"User-Agent": USER_AGENT.encode("ascii"),
}
)
client_headers.update(headers)
self._headers = client_headers
@property
def cookies(self) -> Cookies:
"""
Cookie values to include when sending requests.
"""
return self._cookies
@cookies.setter
def cookies(self, cookies: CookieTypes) -> None:
self._cookies = Cookies(cookies)
@property
def params(self) -> QueryParams:
"""
Query parameters to include in the URL when sending requests.
"""
return self._params
@params.setter
def params(self, params: QueryParamTypes) -> None:
self._params = QueryParams(params)
def build_request(
self,
method: str,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Request:
"""
Build and return a request instance.
* The `params`, `headers` and `cookies` arguments
are merged with any values set on the client.
* The `url` argument is merged with any `base_url` set on the client.
See also: [Request instances][0]
[0]: /advanced/clients/#request-instances
"""
url = self._merge_url(url)
headers = self._merge_headers(headers)
cookies = self._merge_cookies(cookies)
params = self._merge_queryparams(params)
extensions = {} if extensions is None else extensions
if "timeout" not in extensions:
timeout = (
self.timeout
if isinstance(timeout, UseClientDefault)
else Timeout(timeout)
)
extensions = dict(**extensions, timeout=timeout.as_dict())
return Request(
method,
url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
extensions=extensions,
)
def _merge_url(self, url: URL | str) -> URL:
"""
Merge a URL argument together with any 'base_url' on the client,
to create the URL used for the outgoing request.
"""
merge_url = URL(url)
if merge_url.is_relative_url:
# To merge URLs we always append to the base URL. To get this
# behaviour correct we always ensure the base URL ends in a '/'
# separator, and strip any leading '/' from the merge URL.
#
# So, eg...
#
# >>> client = Client(base_url="https://www.example.com/subpath")
# >>> client.base_url
# URL('https://www.example.com/subpath/')
# >>> client.build_request("GET", "/path").url
# URL('https://www.example.com/subpath/path')
merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/")
return self.base_url.copy_with(raw_path=merge_raw_path)
return merge_url
def _merge_cookies(self, cookies: CookieTypes | None = None) -> CookieTypes | None:
"""
Merge a cookies argument together with any cookies on the client,
to create the cookies used for the outgoing request.
"""
if cookies or self.cookies:
merged_cookies = Cookies(self.cookies)
merged_cookies.update(cookies)
return merged_cookies
return cookies
def _merge_headers(self, headers: HeaderTypes | None = None) -> HeaderTypes | None:
"""
Merge a headers argument together with any headers on the client,
to create the headers used for the outgoing request.
"""
merged_headers = Headers(self.headers)
merged_headers.update(headers)
return merged_headers
def _merge_queryparams(
self, params: QueryParamTypes | None = None
) -> QueryParamTypes | None:
"""
Merge a queryparams argument together with any queryparams on the client,
to create the queryparams used for the outgoing request.
"""
if params or self.params:
merged_queryparams = QueryParams(self.params)
return merged_queryparams.merge(params)
return params
def _build_auth(self, auth: AuthTypes | None) -> Auth | None:
if auth is None:
return None
elif isinstance(auth, tuple):
return BasicAuth(username=auth[0], password=auth[1])
elif isinstance(auth, Auth):
return auth
elif callable(auth):
return FunctionAuth(func=auth)
else:
raise TypeError(f'Invalid "auth" argument: {auth!r}')
def _build_request_auth(
self,
request: Request,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
) -> Auth:
auth = (
self._auth if isinstance(auth, UseClientDefault) else self._build_auth(auth)
)
if auth is not None:
return auth
username, password = request.url.username, request.url.password
if username or password:
return BasicAuth(username=username, password=password)
return Auth()
def _build_redirect_request(self, request: Request, response: Response) -> Request:
"""
Given a request and a redirect response, return a new request that
should be used to effect the redirect.
"""
method = self._redirect_method(request, response)
url = self._redirect_url(request, response)
headers = self._redirect_headers(request, url, method)
stream = self._redirect_stream(request, method)
cookies = Cookies(self.cookies)
return Request(
method=method,
url=url,
headers=headers,
cookies=cookies,
stream=stream,
extensions=request.extensions,
)
def _redirect_method(self, request: Request, response: Response) -> str:
"""
When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = request.method
# https://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.SEE_OTHER and method != "HEAD":
method = "GET"
# Do what the browsers do, despite standards...
# Turn 302s into GETs.
if response.status_code == codes.FOUND and method != "HEAD":
method = "GET"
# If a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in 'requests' issue 1704.
if response.status_code == codes.MOVED_PERMANENTLY and method == "POST":
method = "GET"
return method
def _redirect_url(self, request: Request, response: Response) -> URL:
"""
Return the URL for the redirect to follow.
"""
location = response.headers["Location"]
try:
url = URL(location)
except InvalidURL as exc:
raise RemoteProtocolError(
f"Invalid URL in location header: {exc}.", request=request
) from None
# Handle malformed 'Location' headers that are "absolute" form, have no host.
# See: https://github.com/encode/httpx/issues/771
if url.scheme and not url.host:
url = url.copy_with(host=request.url.host)
# Facilitate relative 'Location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
if url.is_relative_url:
url = request.url.join(url)
# Attach previous fragment if needed (RFC 7231 7.1.2)
if request.url.fragment and not url.fragment:
url = url.copy_with(fragment=request.url.fragment)
return url
def _redirect_headers(self, request: Request, url: URL, method: str) -> Headers:
"""
Return the headers that should be used for the redirect request.
"""
headers = Headers(request.headers)
if not _same_origin(url, request.url):
if not _is_https_redirect(request.url, url):
# Strip Authorization headers when responses are redirected
# away from the origin. (Except for direct HTTP to HTTPS redirects.)
headers.pop("Authorization", None)
# Update the Host header.
headers["Host"] = url.netloc.decode("ascii")
if method != request.method and method == "GET":
# If we've switch to a 'GET' request, then strip any headers which
# are only relevant to the request body.
headers.pop("Content-Length", None)
headers.pop("Transfer-Encoding", None)
# We should use the client cookie store to determine any cookie header,
# rather than whatever was on the original outgoing request.
headers.pop("Cookie", None)
return headers
def _redirect_stream(
self, request: Request, method: str
) -> SyncByteStream | AsyncByteStream | None:
"""
Return the body that should be used for the redirect request.
"""
if method != request.method and method == "GET":
return None
return request.stream
def _set_timeout(self, request: Request) -> None:
if "timeout" not in request.extensions:
timeout = (
self.timeout
if isinstance(self.timeout, UseClientDefault)
else Timeout(self.timeout)
)
request.extensions = dict(**request.extensions, timeout=timeout.as_dict())
| BaseClient |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_exceptions/invalid_exceptions_raised.py | {
"start": 215,
"end": 2167
} | class ____:
"""Not an exception."""
def good_case():
"""raise"""
raise ValidException('hop')
def good_case1():
"""zlib.error is defined in C module."""
import zlib
raise zlib.error(4)
def good_case2():
"""decimal.DivisionByZero is defined in C on Python 3."""
import decimal
raise decimal.DivisionByZero(4)
def good_case3():
"""io.BlockingIOError is defined in C."""
import io
raise io.BlockingIOError
def bad_case1():
"""raise"""
raise NewStyleClass() # [raising-non-exception]
def bad_case3():
"""raise"""
raise NewStyleClass # [raising-non-exception]
def bad_case4():
"""raise"""
raise NotImplemented('hop') # [notimplemented-raised]
def bad_case5():
"""raise"""
raise 1 # [raising-bad-type]
def bad_case6():
"""raise"""
raise None # [raising-bad-type]
def bad_case7():
"""raise list"""
raise list # [raising-non-exception]
def bad_case8():
"""raise tuple"""
raise tuple # [raising-non-exception]
def bad_case9():
"""raise dict"""
raise dict # [raising-non-exception]
def unknown_bases():
"""Don't emit when we don't know the bases."""
from lala import bala # pylint: disable=import-outside-toplevel
class MyException(bala):
pass
raise MyException
def exception_instance_regression():
"""Exceptions have a particular class type"""
try:
int("9a")
except ValueError as exc:
raise exc
def reusing_same_name_picks_the_latest_raised_value():
class Error(Exception):
"""some error"""
exceptions = tuple([ValueError, TypeError])
try:
raise ValueError
except exceptions as exc: # pylint: disable=catching-non-exception
# https://github.com/pylint-dev/pylint/issues/1756
exc = Error(exc)
if exc:
raise exc
def bad_case10():
"""raise string"""
raise "string" # [raising-bad-type]
| NewStyleClass |
python | hyperopt__hyperopt | hyperopt/tests/unit/test_domains.py | {
"start": 5912,
"end": 7246
} | class ____:
def test_basic(self):
domain = self._domain_cls()
# print 'domain params', domain.params, domain
# print 'algo params', algo.vh.params
trials = Trials()
fmin(
lambda x: x,
domain.expr,
trials=trials,
algo=suggest,
rstate=np.random.default_rng(4),
max_evals=self._n_steps,
)
assert trials.average_best_error(domain) - domain.loss_target < 0.2
@classmethod
def make(cls, domain_cls, n_steps=500):
class Tester(unittest.TestCase, cls):
def setUp(self):
self._n_steps = n_steps
self._domain_cls = domain_cls
Tester.__name__ = domain_cls.__name__ + "Tester"
return Tester
quadratic1Tester = DomainExperimentMixin.make(quadratic1)
q1_lognormalTester = DomainExperimentMixin.make(q1_lognormal)
q1_choiceTester = DomainExperimentMixin.make(q1_choice)
n_armsTester = DomainExperimentMixin.make(n_arms)
distractorTester = DomainExperimentMixin.make(distractor)
gauss_waveTester = DomainExperimentMixin.make(gauss_wave)
gauss_wave2Tester = DomainExperimentMixin.make(gauss_wave2, n_steps=5000)
many_distsTester = DomainExperimentMixin.make(many_dists)
braninTester = DomainExperimentMixin.make(branin)
| DomainExperimentMixin |
python | bokeh__bokeh | src/bokeh/models/widgets/groups.py | {
"start": 2007,
"end": 2444
} | class ____(AbstractGroup, ButtonLike):
''' Abstract base class for groups with items rendered as buttons.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
orientation = Enum("horizontal", "vertical", help="""
Orient the button group either horizontally (default) or vertically.
""")
@abstract
| ToggleButtonGroup |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 628460,
"end": 629103
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("SponsorableItemEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("SponsorableItem"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| SponsorableItemConnection |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-oci-genai/tests/test_oci_genai.py | {
"start": 386,
"end": 18641
} | class ____(dict):
def __getattr__(self, val) -> Any: # type: ignore[no-untyped-def]
return self[val]
@pytest.mark.parametrize("test_model_id", [])
def test_llm_complete(monkeypatch: MonkeyPatch, test_model_id: str) -> None:
"""Test valid completion call to OCI Generative AI LLM service."""
oci_gen_ai_client = MagicMock()
llm = OCIGenAI(model=test_model_id, client=oci_gen_ai_client)
provider = llm._provider.__class__.__name__
def mocked_response(*args): # type: ignore[no-untyped-def]
response_text = "This is the completion."
if provider == "CohereProvider":
return MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"inference_response": MockResponseDict(
{
"generated_texts": [
MockResponseDict(
{
"text": response_text,
}
)
]
}
)
}
),
}
)
elif provider == "MetaProvider" or provider == "XAIProvider":
return MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"inference_response": MockResponseDict(
{
"choices": [
MockResponseDict(
{
"text": response_text,
}
)
]
}
)
}
),
}
)
else:
return None
monkeypatch.setattr(llm._client, "generate_text", mocked_response)
output = llm.complete("This is a prompt.", temperature=0.2)
assert output.text == "This is the completion."
@pytest.mark.parametrize(
"test_model_id",
[
"cohere.command-r-16k",
"cohere.command-r-plus",
"meta.llama-3-70b-instruct",
"meta.llama-3.1-70b-instruct",
"xai.grok-3-mini",
"xai.grok-4-fast-non-reasoning",
],
)
def test_llm_chat(monkeypatch: MonkeyPatch, test_model_id: str) -> None:
"""Test valid chat call to OCI Generative AI LLM service."""
oci_gen_ai_client = MagicMock()
llm = OCIGenAI(model=test_model_id, client=oci_gen_ai_client)
provider = llm._provider.__class__.__name__
def mocked_response(*args): # type: ignore[no-untyped-def]
response_text = "Assistant chat reply."
response = None
if provider == "CohereProvider":
response = MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"chat_response": MockResponseDict(
{
"text": response_text,
"finish_reason": "stop",
"documents": [],
"citations": [],
"search_queries": [],
"is_search_required": False,
"tool_calls": None,
}
),
"model_id": "cohere.command-r-16k",
"model_version": "1.0",
}
),
"request_id": "req-1234567890",
"headers": {"content-length": "1234"},
}
)
elif provider == "MetaProvider" or provider == "XAIProvider":
response = MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"chat_response": MockResponseDict(
{
"choices": [
MockResponseDict(
{
"message": MockResponseDict(
{
"content": [
MockResponseDict(
{
"text": response_text,
}
)
]
}
),
"finish_reason": "stop",
}
)
],
"time_created": "2024-11-03T12:00:00Z",
}
),
"model_id": "meta.llama-3-70b-instruct",
"model_version": "1.0",
}
),
"request_id": "req-0987654321",
"headers": {"content-length": "1234"},
}
)
return response
monkeypatch.setattr(llm._client, "chat", mocked_response)
messages = [
ChatMessage(role="user", content="User message"),
]
# For Meta provider, we expect fewer fields in additional_kwargs
if provider == "MetaProvider" or provider == "XAIProvider":
additional_kwargs = {
"finish_reason": "stop",
"time_created": "2024-11-03T12:00:00Z",
}
else:
additional_kwargs = {
"finish_reason": "stop",
"documents": [],
"citations": [],
"search_queries": [],
"is_search_required": False,
}
expected = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="Assistant chat reply.",
additional_kwargs=additional_kwargs,
),
raw={}, # Mocked raw data
additional_kwargs={
"model_id": test_model_id,
"model_version": "1.0",
"request_id": "req-1234567890"
if test_model_id == "cohere.command-r-16k"
else "req-0987654321",
"content-length": "1234",
},
)
actual = llm.chat(messages, temperature=0.2)
assert actual.message.content == expected.message.content
@pytest.mark.parametrize(
"test_model_id", ["cohere.command-r-16k", "cohere.command-r-plus", "xai.grok-4"]
)
def test_llm_chat_with_tools(monkeypatch: MonkeyPatch, test_model_id: str) -> None:
"""Test chat_with_tools call to OCI Generative AI LLM service with tool calling."""
oci_gen_ai_client = MagicMock()
llm = OCIGenAI(model=test_model_id, client=oci_gen_ai_client)
provider = llm._provider.__class__.__name__
def mock_tool_function(param1: str) -> str:
"""Mock tool function that takes a string parameter."""
return f"Mock tool function called with {param1}"
# Create proper FunctionTool
mock_tool = FunctionTool.from_defaults(fn=mock_tool_function)
tools = [mock_tool]
messages = [
ChatMessage(role="user", content="User message"),
]
# Mock the client response
def mocked_response(*args, **kwargs):
response_text = "Assistant chat reply."
tool_calls = []
if provider == "CohereProvider":
tool_calls = [
MockResponseDict(
{
"name": "mock_tool_function",
"parameters": {"param1": "test"},
}
)
]
elif provider == "XAIProvider":
tool_calls = [
MockResponseDict(
{
"arguments": '{"param1": "test"}',
"name": "mock_tool_function",
"id": "call_38131587",
}
)
]
response = None
if provider == "CohereProvider":
response = MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"chat_response": MockResponseDict(
{
"text": response_text,
"finish_reason": "stop",
"documents": [],
"citations": [],
"search_queries": [],
"is_search_required": False,
"tool_calls": tool_calls,
}
),
"model_id": test_model_id,
"model_version": "1.0",
}
),
"request_id": "req-1234567890",
"headers": {"content-length": "1234"},
}
)
elif provider == "XAIProvider":
response = MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"chat_response": MockResponseDict(
{
"choices": [
MockResponseDict(
{
"message": MockResponseDict(
{
"content": [
MockResponseDict(
{
"text": "",
}
)
],
"role": "ASSISTANT",
"tool_calls": tool_calls,
}
),
"finish_reason": "tool_calls",
}
)
],
"time_created": "2024-11-03T12:00:00Z",
}
),
"model_id": test_model_id,
"model_version": "1.0",
}
),
"request_id": "req-0987654321",
"headers": {"content-length": "1234"},
}
)
else:
# MetaProvider does not support tools
raise NotImplementedError("Tools not supported for this provider.")
return response
monkeypatch.setattr(llm._client, "chat", mocked_response)
actual_response = llm.chat(
messages=messages,
tools=tools,
)
# Expected response structure
expected_tool_calls = []
if provider == "CohereProvider":
expected_tool_calls = [
{
"name": "mock_tool_function",
"toolUseId": actual_response.message.additional_kwargs["tool_calls"][0][
"toolUseId"
],
"input": json.dumps({"param1": "test"}),
}
]
elif provider == "XAIProvider":
expected_tool_calls = [
{
"name": "mock_tool_function",
"toolUseId": "1234",
"input": json.dumps({"param1": "test"}),
}
]
expected_response = None
if provider == "CohereProvider":
expected_response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="Assistant chat reply.",
additional_kwargs={
"finish_reason": "stop",
"documents": [],
"citations": [],
"search_queries": [],
"is_search_required": False,
"tool_calls": expected_tool_calls,
},
),
raw={},
)
elif provider == "XAIProvider":
expected_response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="",
additional_kwargs={
"finish_reason": "tool_calls",
"tool_calls": expected_tool_calls,
},
),
raw={},
)
# Compare everything except the toolUseId which is randomly generated
assert actual_response.message.role == expected_response.message.role
assert actual_response.message.content == expected_response.message.content
actual_kwargs = actual_response.message.additional_kwargs
expected_kwargs = expected_response.message.additional_kwargs
# Check all non-tool_calls fields
for key in [k for k in expected_kwargs if k != "tool_calls"]:
assert actual_kwargs[key] == expected_kwargs[key]
# Check tool calls separately
actual_tool_calls = actual_kwargs["tool_calls"]
assert len(actual_tool_calls) == len(expected_tool_calls)
for actual_tc, expected_tc in zip(actual_tool_calls, expected_tool_calls):
assert actual_tc["name"] == expected_tc["name"]
assert actual_tc["input"] == expected_tc["input"]
assert "toolUseId" in actual_tc
assert isinstance(actual_tc["toolUseId"], str)
assert len(actual_tc["toolUseId"]) > 0
# Check additional_kwargs
assert actual_response.additional_kwargs == expected_response.additional_kwargs
@pytest.mark.parametrize(
"test_model_id",
["meta.llama-3-70b-instruct", "meta.llama-3.1-70b-instruct", "xai.grok-4"],
)
def test_llm_multimodal_chat_with_image(
monkeypatch: MonkeyPatch, test_model_id: str
) -> None:
"""Test multimodal chat call to OCI Generative AI LLM service with image input."""
oci_gen_ai_client = MagicMock()
llm = OCIGenAI(model=test_model_id, client=oci_gen_ai_client)
def mocked_response(*args, **kwargs):
response_text = "The image contains the OCI logo."
return MockResponseDict(
{
"status": 200,
"data": MockResponseDict(
{
"chat_response": MockResponseDict(
{
"choices": [
MockResponseDict(
{
"message": MockResponseDict(
{
"content": [
MockResponseDict(
{"text": response_text}
)
]
}
),
"finish_reason": "stop",
}
)
],
"time_created": "2024-07-02T12:00:00Z",
}
),
"model_id": test_model_id,
"model_version": "1.0",
}
),
"request_id": "req-0987654321",
"headers": {"content-length": "1234"},
}
)
monkeypatch.setattr(llm._client, "chat", mocked_response)
image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAIAQMAAAD+wSzIAAAABlBMVEX///+/v7+jQ3Y5AAAADklEQVQI12P4AIX8EAgALgAD/aNpbtEAAAAASUVORK5CYII="
messages = [
ChatMessage(
role="user",
content=[
TextBlock(text="What is in this image?"),
ImageBlock(image_url=image_url),
],
)
]
expected = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="The image contains the OCI logo.",
additional_kwargs={
"finish_reason": "stop",
"time_created": "2024-07-02T12:00:00Z",
},
),
raw={},
additional_kwargs={
"model_id": test_model_id,
"model_version": "1.0",
"request_id": "req-0987654321",
"content-length": "1234",
},
)
actual = llm.chat(messages)
assert actual.message.content == expected.message.content
| MockResponseDict |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 33292,
"end": 33471
} | class ____:
xlAutomaticScale = -4105 # from enum XlCategoryType
xlCategoryScale = 2 # from enum XlCategoryType
xlTimeScale = 3 # from enum XlCategoryType
| CategoryType |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 11926,
"end": 12098
} | class ____(Base):
@trigger_something("value1")
def method_decorated(self):
super(NotUselessSuperDecorators, self).method_decorated()
| NotUselessSuperDecorators |
python | sympy__sympy | sympy/codegen/pynodes.py | {
"start": 74,
"end": 111
} | class ____(AbstractList):
pass
| List |
python | getsentry__sentry | tests/sentry/db/models/fields/test_encryption.py | {
"start": 15832,
"end": 18897
} | class ____(models.Model):
id = models.AutoField(primary_key=True)
data = EncryptedCharField(null=True, blank=True)
class Meta:
app_label = "fixtures"
@pytest.mark.django_db
def test_encrypted_char_field_fernet_end_to_end(fernet_keys_store):
"""Test complete save/retrieve cycle with EncryptedField."""
key_id, _fernet_key = fernet_keys_store
with (
override_settings(DATABASE_ENCRYPTION_SETTINGS={"fernet_primary_key_id": key_id}),
override_options({"database.encryption.method": "fernet"}),
):
test_data = "This is sensitive data that should be encrypted"
model_instance = EncryptedFieldModel.objects.create(data=test_data)
assert model_instance.id is not None
# Verify the data was correctly encrypted and decrypted
retrieved_instance = EncryptedFieldModel.objects.get(id=model_instance.id)
assert retrieved_instance.data == test_data
with connection.cursor() as cursor:
cursor.execute(
"SELECT data FROM fixtures_encryptedfieldmodel WHERE id = %s",
[model_instance.id],
)
raw_value = cursor.fetchone()[0]
# Should be in fernet format: enc:fernet:key_id:base64data
assert raw_value.startswith(f"{MARKER_FERNET}:")
assert test_data not in raw_value
@pytest.mark.django_db
def test_encrypted_char_field_plaintext_end_to_end():
"""Test complete save/retrieve cycle with EncryptedCharField."""
with override_options({"database.encryption.method": "plaintext"}):
test_data = "This is plain text data"
model_instance = EncryptedFieldModel.objects.create(data=test_data)
assert model_instance.id is not None
# Verify the data was correctly encrypted and decrypted
retrieved_instance = EncryptedFieldModel.objects.get(id=model_instance.id)
assert retrieved_instance.data == test_data
with connection.cursor() as cursor:
cursor.execute(
"SELECT data FROM fixtures_encryptedfieldmodel WHERE id = %s",
[model_instance.id],
)
# Should be in a format enc:plaintext:base64data
raw_value = cursor.fetchone()[0]
assert raw_value.startswith(f"{MARKER_PLAINTEXT}:")
assert test_data not in raw_value
@pytest.mark.django_db
def test_encrypted_char_field_null_value():
with override_options({"database.encryption.method": "plaintext"}):
model_instance = EncryptedFieldModel.objects.create(data=None)
assert model_instance.id is not None
retrieved_instance = EncryptedFieldModel.objects.get(id=model_instance.id)
assert retrieved_instance.data is None
with connection.cursor() as cursor:
cursor.execute(
"SELECT data FROM fixtures_encryptedfieldmodel WHERE id = %s",
[model_instance.id],
)
raw_value = cursor.fetchone()[0]
assert raw_value is None
| EncryptedFieldModel |
python | encode__starlette | starlette/middleware/base.py | {
"start": 8877,
"end": 10333
} | class ____(Response):
def __init__(
self,
content: AsyncContentStream,
status_code: int = 200,
headers: Mapping[str, str] | None = None,
media_type: str | None = None,
info: Mapping[str, Any] | None = None,
) -> None:
self.info = info
self.body_iterator = content
self.status_code = status_code
self.media_type = media_type
self.init_headers(headers)
self.background = None
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if self.info is not None:
await send({"type": "http.response.debug", "info": self.info})
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
should_close_body = True
async for chunk in self.body_iterator:
if isinstance(chunk, dict):
# We got an ASGI message which is not response body (eg: pathsend)
should_close_body = False
await send(chunk)
continue
await send({"type": "http.response.body", "body": chunk, "more_body": True})
if should_close_body:
await send({"type": "http.response.body", "body": b"", "more_body": False})
if self.background:
await self.background()
| _StreamingResponse |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_glacier.py | {
"start": 1130,
"end": 4494
} | class ____:
def setup_method(self):
with mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.__init__", return_value=None):
self.hook = GlacierHook(aws_conn_id="aws_default")
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_should_return_job_id(self, mock_conn):
# Given
job_id = {"jobId": "1234abcd"}
# when
mock_conn.return_value.initiate_job.return_value = job_id
result = self.hook.retrieve_inventory(VAULT_NAME)
# then
mock_conn.assert_called_once_with()
assert job_id == result
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_should_log_mgs(self, mock_conn, caplog):
# given
job_id = {"jobId": "1234abcd"}
# when
with caplog.at_level(logging.INFO, logger=self.hook.log.name):
caplog.clear()
mock_conn.return_value.initiate_job.return_value = job_id
self.hook.retrieve_inventory(VAULT_NAME)
# then
assert caplog.messages == [
f"Retrieving inventory for vault: {VAULT_NAME}",
f"Initiated inventory-retrieval job for: {VAULT_NAME}",
f"Retrieval Job ID: {job_id.get('jobId')}",
]
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_results_should_return_response(self, mock_conn):
# when
mock_conn.return_value.get_job_output.return_value = RESPONSE_BODY
response = self.hook.retrieve_inventory_results(VAULT_NAME, JOB_ID)
# then
mock_conn.assert_called_once_with()
assert response == RESPONSE_BODY
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_retrieve_inventory_results_should_log_mgs(self, mock_conn, caplog):
# when
with caplog.at_level(logging.INFO, logger=self.hook.log.name):
caplog.clear()
mock_conn.return_value.get_job_output.return_value = REQUEST_RESULT
self.hook.retrieve_inventory_results(VAULT_NAME, JOB_ID)
# then
assert caplog.messages == [f"Retrieving the job results for vault: {VAULT_NAME}..."]
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_describe_job_should_return_status_succeeded(self, mock_conn):
# when
mock_conn.return_value.describe_job.return_value = JOB_STATUS
response = self.hook.describe_job(VAULT_NAME, JOB_ID)
# then
mock_conn.assert_called_once_with()
assert response == JOB_STATUS
@mock.patch("airflow.providers.amazon.aws.hooks.glacier.GlacierHook.get_conn")
def test_describe_job_should_log_mgs(self, mock_conn, caplog):
# when
with caplog.at_level(logging.INFO, logger=self.hook.log.name):
caplog.clear()
mock_conn.return_value.describe_job.return_value = JOB_STATUS
self.hook.describe_job(VAULT_NAME, JOB_ID)
# then
assert caplog.messages == [
f"Retrieving status for vault: {VAULT_NAME} and job {JOB_ID}",
f"Job status: {JOB_STATUS.get('Action')}, code status: {JOB_STATUS.get('StatusCode')}",
]
| TestAmazonGlacierHook |
python | doocs__leetcode | lcp/LCP 19. 秋叶收藏集/Solution.py | {
"start": 0,
"end": 595
} | class ____:
def minimumOperations(self, leaves: str) -> int:
n = len(leaves)
f = [[inf] * 3 for _ in range(n)]
f[0][0] = int(leaves[0] == "y")
for i in range(1, n):
if leaves[i] == "r":
f[i][0] = f[i - 1][0]
f[i][1] = min(f[i - 1][0], f[i - 1][1]) + 1
f[i][2] = min(f[i - 1][2], f[i - 1][1])
else:
f[i][0] = f[i - 1][0] + 1
f[i][1] = min(f[i - 1][0], f[i - 1][1])
f[i][2] = min(f[i - 1][2], f[i - 1][1]) + 1
return f[n - 1][2]
| Solution |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 6879,
"end": 20904
} | class ____(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future() # type: Future[None]
return Application(
[
("/echo", EchoHandler, dict(close_future=self.close_future)),
("/non_ws", NonWebSocketHandler),
("/redirect", RedirectHandler),
("/header", HeaderHandler, dict(close_future=self.close_future)),
(
"/header_echo",
HeaderEchoHandler,
dict(close_future=self.close_future),
),
(
"/close_reason",
CloseReasonHandler,
dict(close_future=self.close_future),
),
(
"/error_in_on_message",
ErrorInOnMessageHandler,
dict(close_future=self.close_future),
),
(
"/async_prepare",
AsyncPrepareHandler,
dict(close_future=self.close_future),
),
(
"/path_args/(.*)",
PathArgsHandler,
dict(close_future=self.close_future),
),
(
"/coroutine",
CoroutineOnMessageHandler,
dict(close_future=self.close_future),
),
("/render", RenderMessageHandler, dict(close_future=self.close_future)),
(
"/subprotocol",
SubprotocolHandler,
dict(close_future=self.close_future),
),
(
"/open_coroutine",
OpenCoroutineHandler,
dict(close_future=self.close_future, test=self),
),
("/error_in_open", ErrorInOpenHandler),
("/error_in_async_open", ErrorInAsyncOpenHandler),
("/nodelay", NoDelayHandler),
],
template_loader=DictLoader({"message.html": "<b>{{ message }}</b>"}),
)
def get_http_client(self):
# These tests require HTTP/1; force the use of SimpleAsyncHTTPClient.
return SimpleAsyncHTTPClient()
def tearDown(self):
super().tearDown()
RequestHandler._template_loaders.clear()
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch("/echo")
self.assertEqual(response.code, 400)
def test_missing_websocket_key(self):
response = self.fetch(
"/echo",
headers={
"Connection": "Upgrade",
"Upgrade": "WebSocket",
"Sec-WebSocket-Version": "13",
},
)
self.assertEqual(response.code, 400)
def test_bad_websocket_version(self):
response = self.fetch(
"/echo",
headers={
"Connection": "Upgrade",
"Upgrade": "WebSocket",
"Sec-WebSocket-Version": "12",
},
)
self.assertEqual(response.code, 426)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect("/echo")
yield ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
def test_websocket_callbacks(self):
with ignore_deprecation():
websocket_connect(
"ws://127.0.0.1:%d/echo" % self.get_http_port(), callback=self.stop
)
ws = self.wait().result()
ws.write_message("hello")
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, "hello")
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect("/echo")
ws.write_message(b"hello \xe9", binary=True)
response = yield ws.read_message()
self.assertEqual(response, b"hello \xe9")
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect("/echo")
ws.write_message("hello \u00e9")
response = yield ws.read_message()
self.assertEqual(response, "hello \u00e9")
@gen_test
def test_error_in_closed_client_write_message(self):
ws = yield self.ws_connect("/echo")
ws.close()
with self.assertRaises(WebSocketClosedError):
ws.write_message("hello \u00e9")
@gen_test
def test_render_message(self):
ws = yield self.ws_connect("/render")
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "<b>hello</b>")
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect("/error_in_on_message")
ws.write_message("hello")
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIsNone(response)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect("/notfound")
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect("/non_ws")
@gen_test
def test_websocket_http_redirect(self):
with self.assertRaises(HTTPError):
yield self.ws_connect("/redirect")
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*", required=False):
yield websocket_connect(
"ws://127.0.0.1:%d/" % port, connect_timeout=3600
)
@gen_test
def test_websocket_close_buffered_data(self):
with contextlib.closing(
(yield websocket_connect("ws://127.0.0.1:%d/echo" % self.get_http_port()))
) as ws:
ws.write_message("hello")
ws.write_message("world")
# Close the underlying stream.
ws.stream.close()
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
with contextlib.closing(
(
yield websocket_connect(
HTTPRequest(
"ws://127.0.0.1:%d/header" % self.get_http_port(),
headers={"X-Test": "hello"},
)
)
)
) as ws:
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_websocket_header_echo(self):
# Ensure that headers can be returned in the response.
# Specifically, that arbitrary headers passed through websocket_connect
# can be returned.
with contextlib.closing(
(
yield websocket_connect(
HTTPRequest(
"ws://127.0.0.1:%d/header_echo" % self.get_http_port(),
headers={"X-Test-Hello": "hello"},
)
)
)
) as ws:
self.assertEqual(ws.headers.get("X-Test-Hello"), "hello")
self.assertEqual(
ws.headers.get("X-Extra-Response-Header"), "Extra-Response-Value"
)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect("/close_reason")
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect("/echo")
ws.close(1001, "goodbye")
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, "goodbye")
@gen_test
def test_write_after_close(self):
ws = yield self.ws_connect("/close_reason")
msg = yield ws.read_message()
self.assertIs(msg, None)
with self.assertRaises(WebSocketClosedError):
ws.write_message("hello")
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect("/async_prepare")
ws.write_message("hello")
res = yield ws.read_message()
self.assertEqual(res, "hello")
@gen_test
def test_path_args(self):
ws = yield self.ws_connect("/path_args/hello")
res = yield ws.read_message()
self.assertEqual(res, "hello")
@gen_test
def test_coroutine(self):
ws = yield self.ws_connect("/coroutine")
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message("hello1")
yield ws.write_message("hello2")
res = yield ws.read_message()
self.assertEqual(res, "hello1")
res = yield ws.read_message()
self.assertEqual(res, "hello2")
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "http://127.0.0.1:%d" % port}
with contextlib.closing(
(yield websocket_connect(HTTPRequest(url, headers=headers)))
) as ws:
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "http://127.0.0.1:%d/something" % port}
with contextlib.closing(
(yield websocket_connect(HTTPRequest(url, headers=headers)))
) as ws:
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "127.0.0.1:%d" % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {"Origin": "http://somewhereelse.com"}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
# CaresResolver may return ipv6-only results for localhost, but our
# server is only running on ipv4. Test for this edge case and skip
# the test if it happens.
addrinfo = yield Resolver().resolve("localhost", port)
families = {addr[0] for addr in addrinfo}
if socket.AF_INET not in families:
self.skipTest("localhost does not resolve to ipv4")
return
url = "ws://localhost:%d/echo" % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {"Origin": "http://subtenant.localhost"}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_subprotocols(self):
ws = yield self.ws_connect(
"/subprotocol", subprotocols=["badproto", "goodproto"]
)
self.assertEqual(ws.selected_subprotocol, "goodproto")
res = yield ws.read_message()
self.assertEqual(res, "subprotocol=goodproto")
@gen_test
def test_subprotocols_not_offered(self):
ws = yield self.ws_connect("/subprotocol")
self.assertIs(ws.selected_subprotocol, None)
res = yield ws.read_message()
self.assertEqual(res, "subprotocol=None")
@gen_test
def test_open_coroutine(self):
self.message_sent = Event()
ws = yield self.ws_connect("/open_coroutine")
yield ws.write_message("hello")
self.message_sent.set()
res = yield ws.read_message()
self.assertEqual(res, "ok")
@gen_test
def test_error_in_open(self):
with ExpectLog(app_log, "Uncaught exception"):
ws = yield self.ws_connect("/error_in_open")
res = yield ws.read_message()
self.assertIsNone(res)
@gen_test
def test_error_in_async_open(self):
with ExpectLog(app_log, "Uncaught exception"):
ws = yield self.ws_connect("/error_in_async_open")
res = yield ws.read_message()
self.assertIsNone(res)
@gen_test
def test_nodelay(self):
ws = yield self.ws_connect("/nodelay")
res = yield ws.read_message()
self.assertEqual(res, "hello")
| WebSocketTest |
python | google__pytype | pytype/tests/test_type_comments1.py | {
"start": 8405,
"end": 20212
} | class ____(test_base.BaseTest):
"""Tests for type comments applied to assignments."""
def test_class_attribute_comment(self):
ty = self.Infer("""
class Foo:
s = None # type: str
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
s = ... # type: str
""",
)
def test_instance_attribute_comment(self):
ty = self.Infer("""
class Foo:
def __init__(self):
self.s = None # type: str
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
s = ... # type: str
def __init__(self) -> None: ...
""",
)
def test_global_comment(self):
ty = self.Infer("""
X = None # type: str
""")
self.assertTypesMatchPytd(
ty,
"""
X = ... # type: str
""",
)
def test_global_comment2(self):
ty = self.Infer("""
X = None # type: str
def f(): global X
""")
self.assertTypesMatchPytd(
ty,
"""
X = ... # type: str
def f() -> None: ...
""",
)
def test_local_comment(self):
ty = self.Infer("""
X = None
def foo():
x = X # type: str
return x
""")
self.assertTypesMatchPytd(
ty,
"""
X = ... # type: None
def foo() -> str: ...
""",
)
def test_cellvar_comment(self):
"""Type comment on an assignment generating the STORE_DEREF opcode."""
ty = self.Infer("""
from typing import Mapping
def f():
map = dict() # type: Mapping
return (map, {x: map.get(y) for x, y in __any_object__})
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Mapping, Tuple
def f() -> Tuple[Mapping, dict]: ...
""",
)
def test_bad_comment(self):
ty, errors = self.InferWithErrors("""
X = None # type: abc def # invalid-annotation[e]
""")
if self.python_version >= (3, 10):
error_reason = "invalid syntax"
else:
error_reason = "unexpected EOF"
self.assertErrorRegexes(errors, {"e": rf"abc def.*{error_reason}"})
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
X = ... # type: Any
""",
)
def test_conversion_error(self):
ty, errors = self.InferWithErrors("""
X = None # type: 1 if __random__ else 2 # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"X.*Must be constant"})
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
X = ... # type: Any
""",
)
def test_name_error_inside_comment(self):
errors = self.CheckWithErrors("""
X = None # type: Foo # name-error[e]
""")
self.assertErrorRegexes(errors, {"e": r"Foo"})
def test_warn_on_ignored_type_comment(self):
errors = self.CheckWithErrors("""
X = []
X[0] = None # type: str # ignored-type-comment[e1]
# type: int # ignored-type-comment[e2]
""")
self.assertErrorRegexes(errors, {"e1": r"str", "e2": r"int"})
def test_attribute_initialization(self):
ty = self.Infer("""
class A:
def __init__(self):
self.x = 42
a = None # type: A
x = a.x
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
x = ... # type: int
def __init__(self) -> None: ...
a = ... # type: A
x = ... # type: int
""",
)
def test_none_to_none_type(self):
ty = self.Infer("""
x = None # type: None
""")
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: None
""",
)
def test_module_instance_as_bad_type_comment(self):
errors = self.CheckWithErrors("""
import sys
x = None # type: sys # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"instance of module.*x"})
def test_forward_reference(self):
ty, errors = self.InferWithErrors("""
a = None # type: "A"
b = None # type: "Nonexistent" # name-error[e]
class A:
def __init__(self):
self.x = 42
def f(self):
return a.x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class A:
x = ... # type: int
def __init__(self) -> None: ...
def f(self) -> int: ...
a = ... # type: A
b = ... # type: Any
""",
)
self.assertErrorRegexes(errors, {"e": r"Nonexistent"})
def test_class_variable_forward_reference(self):
ty = self.Infer("""
class A:
a = None # type: 'A'
def __init__(self):
self.x = 42
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
a: A
x: int
def __init__(self) -> None: ...
""",
)
def test_use_forward_reference(self):
ty = self.Infer("""
a = None # type: "A"
x = a.x
class A:
def __init__(self):
self.x = 42
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class A:
x = ... # type: int
def __init__(self) -> None: ...
a = ... # type: A
x = ... # type: Any
""",
)
def test_use_class_variable_forward_reference(self):
# Attribute accesses for A().a all get resolved to Any (b/134706992)
ty = self.Infer("""
class A:
a = None # type: 'A'
def f(self):
return self.a
x = A().a
def g():
return A().a
y = g()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, TypeVar
_TA = TypeVar('_TA', bound=A)
class A:
a: A
def f(self: _TA) -> _TA: ...
x: A
y: A
def g() -> A: ...
""",
)
def test_class_variable_forward_reference_error(self):
self.InferWithErrors("""
class A:
a = None # type: 'A'
g = A().a.foo() # attribute-error
""")
def test_multiline_value(self):
ty, errors = self.InferWithErrors("""
v = [
{
"a": 1 # type: complex # ignored-type-comment[e1]
} # type: dict[str, int] # ignored-type-comment[e2]
] # type: list[dict[str, float]]
""")
self.assertTypesMatchPytd(
ty,
"""
v = ... # type: list[dict[str, float]]
""",
)
self.assertErrorRegexes(
errors,
{
"e1": r"Stray type comment: complex",
"e2": r"Stray type comment: dict\[str, int\]",
},
)
def test_multiline_value_with_blank_lines(self):
ty = self.Infer("""
a = [[
]
] # type: list[list[int]]
""")
self.assertTypesMatchPytd(
ty,
"""
a = ... # type: list[list[int]]
""",
)
def test_type_comment_name_error(self):
errors = self.CheckWithErrors("""
def f():
x = None # type: Any # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"not defined$"})
def test_type_comment_invalid_syntax(self):
errors = self.CheckWithErrors("""
def f():
x = None # type: y = 1 # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"invalid syntax$"})
def test_discarded_type_comment(self):
"""Discard the first whole-line comment, keep the second."""
ty = self.Infer("""
# We want either # type: ignore or # type: int
def hello_world():
# type: () -> str
return 'hello world'
""")
self.assertTypesMatchPytd(
ty,
"""
def hello_world() -> str: ...
""",
)
def test_multiple_type_comments(self):
"""We should not allow multiple type comments on one line."""
errors = self.CheckWithErrors("""
a = 42 # type: int # type: float # invalid-directive[e]
""")
self.assertErrorRegexes(errors, {"e": r"Multiple"})
def test_nested_comment_alias(self):
ty = self.Infer("""
class A: pass
class B:
C = A
x = None # type: C
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Type
class A: pass
class B:
C: Type[A]
x: A
""",
)
def test_nested_classes_comments(self):
ty = self.Infer("""
class A:
class B: pass
x = None # type: B
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
class B: ...
x: A.B
""",
)
def test_list_comprehension_comments(self):
ty, errors = self.InferWithErrors("""
from typing import List
def f(x):
# type: (str) -> None
pass
def g(xs):
# type: (List[str]) -> List[str]
ys = [f(x) for x in xs] # type: List[str] # annotation-type-mismatch[e]
return ys
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
def f(x: str) -> None: ...
def g(xs: List[str]) -> List[str]: ...
""",
)
self.assertErrorRegexes(
errors, {"e": r"Annotation: list\[str\].*Assignment: list\[None\]"}
)
def test_multiple_assignments(self):
ty = self.Infer("""
a = 1; b = 2; c = 4 # type: float
""")
self.assertTypesMatchPytd(
ty,
"""
a = ... # type: int
b = ... # type: int
c = ... # type: float
""",
)
def test_instantiate_fully_quoted_type(self):
ty, errors = self.InferWithErrors("""
from typing import Optional
x = None # type: "Optional[A]"
class A:
a = 0
y = x.a # attribute-error[e]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional
x: Optional[A]
class A:
a: int
y: int
""",
)
self.assertErrorRegexes(errors, {"e": r"a.*None"})
def test_do_not_resolve_late_type_to_function(self):
ty = self.Infer("""
v = None # type: "A"
class A:
def A(self):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
v: A
class A:
def A(self) -> None: ...
""",
)
def test_illegal_function_late_type(self):
self.CheckWithErrors("""
v = None # type: "F" # invalid-annotation
def F(): pass
""")
def test_bad_type_comment_in_constructor(self):
self.CheckWithErrors("""
class Foo:
def __init__(self):
self.x = None # type: "Bar" # name-error
""")
def test_dict_type_comment(self):
self.Check("""
from typing import Any, Callable, Dict, Tuple
d = {
'a': 'long'
'string'
'value'
} # type: Dict[str, str]
""")
def test_break_on_period(self):
self.Check("""
really_really_really_long_module_name = None # type: module
d = {}
v = d.get('key', (really_really_really_long_module_name.
also_long_attribute_name)) # type: int
""")
def test_assignment_between_functions(self):
ty = self.Infer("""
def f(): pass
x = 0 # type: int
def g():
'''Docstring.'''
""")
self.assertTypesMatchPytd(
ty,
"""
def f() -> None: ...
x: int
def g() -> None: ...
""",
)
def test_type_comment_on_class(self):
# What error is reported differs depending on whether directors.py is using
# libcst (host 3.8-) or ast (host 3.9+) and the target version. All we care
# about is that the type comment is not ignored.
self.CheckWithErrors("""
class Foo( # annotation-type-mismatch<3.9
int): # type: str # ignored-type-comment>=3.9
pass
""")
if __name__ == "__main__":
test_base.main()
| AssignmentCommentTest |
python | realpython__materials | wordcount/tests/task_04.py | {
"start": 505,
"end": 1764
} | class ____:
def test_long_word_without_trailing_newline(self, wc):
assert b" 0 1 29\n" == wc(stdin=b"floccinaucinihilipilification")
def test_long_word_with_trailing_newline(self, wc):
assert b" 1 1 30\n" == wc(stdin=b"floccinaucinihilipilification\n")
def test_multiple_words_without_trailing_newline(self, wc):
assert b" 0 5 26\n" == wc(stdin=b"Lorem ipsum dolor sit amet")
def test_multiple_words_with_trailing_newline(self, wc):
assert b" 1 5 27\n" == wc(stdin=b"Lorem ipsum dolor sit amet\n")
def test_long_text_multiple_lines(self, wc):
assert b" 6 69 447\n" == wc(
stdin=(
b"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\n"
b"tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\n"
b"quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\n"
b"consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\n"
b"cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\n"
b"proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"
)
)
| Test |
python | PrefectHQ__prefect | tests/cli/test_api_command.py | {
"start": 10195,
"end": 11006
} | class ____:
"""Test different input sources for request body."""
def test_data_from_file(self, respx_mock: MockRouter, tmp_path) -> None:
"""Test reading data from file with @filename syntax."""
route = respx_mock.post("http://localhost:4200/api/flows/filter").mock(
return_value=httpx.Response(200, json={})
)
data_file = tmp_path / "data.json"
data_file.write_text('{"test": "value"}')
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "POST", "/flows/filter", "--data", f"@{data_file}"],
expected_code=0,
)
assert route.called
assert json.loads(route.calls.last.request.content) == {"test": "value"}
| TestInputSources |
python | xlwings__xlwings | tests/test_app.py | {
"start": 119,
"end": 769
} | class ____(TestBase):
def test_active(self):
self.assertTrue(xw.apps.active in [self.app1, self.app2])
def test_len(self):
n_original = len(xw.apps)
app = xw.App(spec=SPEC)
app.books.add()
self.assertEqual(n_original + 1, len(xw.apps))
app.quit()
def test_count(self):
self.assertEqual(xw.apps.count, len(xw.apps))
def test_iter(self):
for app in xw.apps:
if app == (self.app1 or self.app2):
self.assertEqual(len(app.books), 2)
def test_keys(self):
k = xw.apps.keys()[0]
self.assertEqual(xw.apps[k], xw.apps(k))
| TestApps |
python | bokeh__bokeh | src/bokeh/models/comparisons.py | {
"start": 3566,
"end": 4530
} | class ____(Comparison):
''' A client-side comparison that can sort NaN values first or last. This
comparison can be useful for DataTable columns.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
ascending_first = Bool(default=False, help="""
Whether NaN values should appear first or last in an ascending sort.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| NanCompare |
python | walkccc__LeetCode | solutions/2293. Min Max Game/2293.py | {
"start": 0,
"end": 332
} | class ____:
def minMaxGame(self, nums: list[int]) -> int:
if len(nums) == 1:
return nums[0]
nextNums = []
for i in range(len(nums) // 2):
nextNums.append(min(nums[2 * i], nums[2 * i + 1]) if i % 2 == 0 else
max(nums[2 * i], nums[2 * i + 1]))
return self.minMaxGame(nextNums)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar7.py | {
"start": 723,
"end": 2526
} | class ____(Generic[_T1, _T2]):
async def func1(self, a: _T1) -> _T1:
_ = a.var1
# This should generate an error.
_ = a.var2
# This should generate an error.
_ = a(3.3)
# This should generate two errors.
_ = a[0]
# This should generate an error.
_ = a + 1
_ = a + a
a += a
# This should generate an error.
_ = -a
# This should generate an error.
a += 3
# This should generate an error.
_ = await a
# This should generate two errors.
for _ in a:
pass
a.do_stuff()
# This should generate an error.
a.do_other_stuff()
_ = a.__class__
_ = a.__doc__
return a
async def func2(self, a: _T2, b: _T1) -> _T1:
# This should generate two errors.
_ = a.var2
# This should generate an error.
_ = a(3.3)
# This should generate two errors.
_ = a[0]
# This should generate an error.
_ = a + 1
_ = a + a
a += a
# This should generate an error.
_ = a + b
# This should generate an error.
_ = -a
# This should generate an error.
a += 3
# This should generate an error.
_ = await a
# This should generate an error.
for _ in a:
pass
# This should generate an error.
a.do_other_stuff()
_ = a.__class__
_ = a.__doc__
return b
_T3 = TypeVar("_T3", float, int, str)
_T4 = TypeVar("_T4", float, int)
def custom_add(a: _T3, b: _T4) -> float:
if isinstance(a, str):
return 0
c = a + b
reveal_type(c, expected_text="float* | int*")
return c
| ClassA |
python | openai__openai-python | tests/test_transform.py | {
"start": 4022,
"end": 4323
} | class ____(TypedDict):
foo: str
@parametrize
@pytest.mark.asyncio
async def test_ignores_invalid_input(use_async: bool) -> None:
assert await transform({"bar": "<foo>"}, Foo7, use_async) == {"bAr": "<foo>"}
assert await transform({"foo": "<foo>"}, Foo7, use_async) == {"foo": "<foo>"}
| Bar7 |
python | spyder-ide__spyder | spyder/api/widgets/dialogs.py | {
"start": 1182,
"end": 2786
} | class ____(QDialogButtonBox):
"""QDialogButtonBox widget for Spyder."""
def __init__(self, buttons=None, orientation=Qt.Horizontal, parent=None):
if buttons:
super().__init__(buttons, orientation, parent)
elif orientation:
super().__init__(orientation=orientation, parent=parent)
else:
super().__init__(parent=parent)
# Don't display icons on standard buttons. This is a problem on Linux
button_constants = [
QDialogButtonBox.Ok,
QDialogButtonBox.Open,
QDialogButtonBox.Save,
QDialogButtonBox.Cancel,
QDialogButtonBox.Close,
QDialogButtonBox.Discard,
QDialogButtonBox.Apply,
QDialogButtonBox.Reset,
QDialogButtonBox.RestoreDefaults,
QDialogButtonBox.Help,
QDialogButtonBox.SaveAll,
QDialogButtonBox.Yes,
QDialogButtonBox.YesToAll,
QDialogButtonBox.No,
QDialogButtonBox.NoToAll,
QDialogButtonBox.Abort,
QDialogButtonBox.Retry,
QDialogButtonBox.Ignore,
]
for constant in button_constants:
button = self.button(constant)
if button is not None:
button.setIcon(QIcon())
# Set a reasonable spacing between buttons. This is a problem on Mac
self.layout().setSpacing(2 * AppStyle.MarginSize)
# Set style
style = _SpyderButtonsProxyStyle(None)
style.setParent(self)
self.setStyle(style)
| SpyderDialogButtonBox |
python | getsentry__sentry-python | tests/test_basics.py | {
"start": 28645,
"end": 29903
} | class ____:
def __init__(self, word):
self.word = word
def greet(self, new_word=None):
return "Hello, {}".format(new_word if new_word else self.word)
def test_functions_to_trace_with_class(sentry_init, capture_events):
functions_to_trace = [
{"qualified_name": "tests.test_basics.WorldGreeter.greet"},
]
sentry_init(
traces_sample_rate=1.0,
functions_to_trace=functions_to_trace,
)
events = capture_events()
with start_transaction(name="something"):
wg = WorldGreeter("World")
wg.greet()
wg.greet("You")
assert len(events) == 1
(event,) = events
assert len(event["spans"]) == 2
assert event["spans"][0]["description"] == "tests.test_basics.WorldGreeter.greet"
assert event["spans"][1]["description"] == "tests.test_basics.WorldGreeter.greet"
def test_multiple_setup_integrations_calls():
first_call_return = setup_integrations([NoOpIntegration()], with_defaults=False)
assert first_call_return == {NoOpIntegration.identifier: NoOpIntegration()}
second_call_return = setup_integrations([NoOpIntegration()], with_defaults=False)
assert second_call_return == {NoOpIntegration.identifier: NoOpIntegration()}
| WorldGreeter |
python | graphql-python__graphene | graphene/types/datetime.py | {
"start": 181,
"end": 1327
} | class ____(Scalar):
"""
The `Date` scalar type represents a Date
value as specified by
[iso8601](https://en.wikipedia.org/wiki/ISO_8601).
"""
@staticmethod
def serialize(date):
if isinstance(date, datetime.datetime):
date = date.date()
if not isinstance(date, datetime.date):
raise GraphQLError(f"Date cannot represent value: {repr(date)}")
return date.isoformat()
@classmethod
def parse_literal(cls, node, _variables=None):
if not isinstance(node, StringValueNode):
raise GraphQLError(
f"Date cannot represent non-string value: {print_ast(node)}"
)
return cls.parse_value(node.value)
@staticmethod
def parse_value(value):
if isinstance(value, datetime.date):
return value
if not isinstance(value, str):
raise GraphQLError(f"Date cannot represent non-string value: {repr(value)}")
try:
return datetime.date.fromisoformat(value)
except ValueError:
raise GraphQLError(f"Date cannot represent value: {repr(value)}")
| Date |
python | cherrypy__cherrypy | cherrypy/lib/locking.py | {
"start": 239,
"end": 926
} | class ____(object):
"""A simple timer that will indicate when an expiration time has passed."""
def __init__(self, expiration):
"""Create a timer that expires at `expiration` (UTC datetime)."""
self.expiration = expiration
@classmethod
def after(cls, elapsed):
"""Return a timer that will expire after `elapsed` passes."""
return cls(
datetime.datetime.now(datetime.timezone.utc) + elapsed,
)
def expired(self):
"""Check whether the timer has expired."""
return (
datetime.datetime.now(
datetime.timezone.utc,
)
>= self.expiration
)
| Timer |
python | pytorch__pytorch | test/test_cuda_sanitizer.py | {
"start": 5076,
"end": 16552
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.handler = csan.EventHandler()
def kernel_launch(
self,
stream: StreamId,
read_only: Optional[list[DataPtr]] = None,
read_write: Optional[list[DataPtr]] = None,
) -> list[csan.SynchronizationError]:
if read_only is None:
read_only = []
if read_write is None:
read_write = []
return self.handler._handle_kernel_launch(
stream,
read_only,
read_write,
{},
"",
{k: [""] for k in read_only + read_write},
)
def assert_good_kernel_launch(
self,
stream: StreamId,
read_only: Optional[list[DataPtr]] = None,
read_write: Optional[list[DataPtr]] = None,
) -> None:
self.assertEqual(self.kernel_launch(stream, read_only, read_write), [])
def assert_bad_kernel_launch(
self,
number_of_errors: int,
stream: StreamId,
read_only: Optional[list[DataPtr]] = None,
read_write: Optional[list[DataPtr]] = None,
) -> None:
errors = self.kernel_launch(stream, read_only, read_write)
self.assertEqual(len(errors), number_of_errors)
def test_empty_kernel_launch(self):
self.assert_good_kernel_launch(stream_id(0))
def test_simple_passing(self):
self.assert_good_kernel_launch(stream_id(1), read_only=[tensor_id(1)])
self.assert_good_kernel_launch(stream_id(2), read_only=[tensor_id(1)])
def test_simple_error(self):
self.assert_good_kernel_launch(stream_id(1), read_only=[tensor_id(1)])
self.assert_bad_kernel_launch(1, stream_id(2), read_write=[tensor_id(1)])
def test_simple_sync(self):
self.assert_good_kernel_launch(stream_id(1), read_only=[tensor_id(1)])
self.handler._handle_event_record(event_id(0), stream_id(1))
self.handler._handle_event_wait(event_id(0), stream_id(2))
self.assert_good_kernel_launch(stream_id(2), read_write=[tensor_id(1)])
def test_reads_check_last_write(self):
# Tests that not only the first read operation checks if it is in conflict
# with the last write operation, but all read operations do.
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.handler._handle_event_record(event_id(0), stream_id(1))
self.handler._handle_event_wait(event_id(0), stream_id(2))
self.assert_good_kernel_launch(stream_id(2), read_only=[tensor_id(1)])
self.assert_bad_kernel_launch(1, stream_id(3), read_only=[tensor_id(1)])
def test_branch_sync(self):
# Tests that two streams can read after both waiting for a third, but they
# cannot write without further synchronization.
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.handler._handle_event_record(event_id(0), stream_id(1))
self.handler._handle_event_wait(event_id(0), stream_id(2))
self.handler._handle_event_wait(event_id(0), stream_id(3))
self.assert_good_kernel_launch(stream_id(2), read_only=[tensor_id(1)])
self.assert_good_kernel_launch(stream_id(3), read_only=[tensor_id(1)])
self.assert_bad_kernel_launch(1, stream_id(2), read_write=[tensor_id(1)])
def test_chain_sync(self):
iterations = 10
self.assert_good_kernel_launch(stream_id(0), read_only=[tensor_id(1)])
for i in range(iterations):
self.handler._handle_event_record(event_id(i), stream_id(i))
self.handler._handle_event_wait(event_id(i), stream_id(i + 1))
self.assert_good_kernel_launch(stream_id(iterations), read_write=[tensor_id(1)])
def test_expired_record(self):
self.assert_good_kernel_launch(stream_id(1), read_only=[tensor_id(1)])
self.handler._handle_event_record(event_id(0), stream_id(1))
self.assert_good_kernel_launch(stream_id(1), read_only=[tensor_id(1)])
self.handler._handle_event_wait(event_id(0), stream_id(2))
self.assert_bad_kernel_launch(1, stream_id(2), read_write=[tensor_id(1)])
def test_deleted_record(self):
for should_delete, should_create in [
(True, True),
(True, False),
(False, True),
]:
self.setUp()
with self.subTest(should_delete=should_delete, should_create=should_create):
self.assert_good_kernel_launch(stream_id(1), read_only=[tensor_id(1)])
self.handler._handle_event_record(event_id(0), stream_id(1))
if should_delete:
self.handler._handle_event_deletion(event_id(0))
if should_create:
self.handler._handle_event_creation(event_id(0))
self.handler._handle_event_wait(event_id(0), stream_id(2))
self.assert_bad_kernel_launch(
1, stream_id(2), read_write=[tensor_id(1)]
)
def test_all_reads_checked_failing(self):
iterations = 10
for i in range(1, iterations):
self.assert_good_kernel_launch(stream_id(i), read_only=[tensor_id(1)])
self.handler._handle_event_record(event_id(i), stream_id(i))
for i in range(1, iterations):
self.handler._handle_event_wait(event_id(i), stream_id(0))
self.assert_good_kernel_launch(stream_id(iterations), read_only=[tensor_id(1)])
self.handler._handle_event_record(event_id(iterations), stream_id(i))
# Does not synchronize with the last read.
self.assert_bad_kernel_launch(1, stream_id(0), read_write=[tensor_id(1)])
def test_all_reads_checked_passing(self):
iterations = 10
for i in range(1, iterations):
self.assert_good_kernel_launch(stream_id(i), read_only=[tensor_id(1)])
self.handler._handle_event_record(event_id(i), stream_id(i))
for i in range(1, iterations):
self.handler._handle_event_wait(event_id(i), stream_id(0))
self.assert_good_kernel_launch(stream_id(0), read_write=[tensor_id(1)])
def test_multiple_errors(self):
iterations = 10
self.assert_good_kernel_launch(
stream_id(0), read_write=[tensor_id(i) for i in range(iterations)]
)
self.assert_bad_kernel_launch(
iterations,
stream_id(1),
read_write=[tensor_id(i) for i in range(iterations)],
)
def test_correct_state_merging(self):
# Tests that after waiting for an event, a stream's state is indeed set
# to the pointwise maximum of its old state and the recorded state.
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.assert_good_kernel_launch(stream_id(2), read_write=[tensor_id(2)])
self.handler._handle_event_record(event_id(1), stream_id(1))
self.handler._handle_event_record(event_id(2), stream_id(2))
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.assert_good_kernel_launch(stream_id(2), read_write=[tensor_id(2)])
self.handler._handle_event_wait(event_id(1), stream_id(2))
self.handler._handle_event_wait(event_id(2), stream_id(1))
self.handler._handle_event_record(event_id(3), stream_id(2))
self.handler._handle_event_wait(event_id(3), stream_id(1))
self.assert_good_kernel_launch(
stream_id(1), read_write=[tensor_id(1), tensor_id(2)]
)
def test_record_override(self):
self.assert_good_kernel_launch(stream_id(1), read_only=[tensor_id(1)])
self.assert_good_kernel_launch(stream_id(2), read_only=[tensor_id(2)])
self.handler._handle_event_record(event_id(1), stream_id(1))
self.handler._handle_event_record(event_id(1), stream_id(2))
self.handler._handle_event_wait(event_id(1), stream_id(3))
self.assert_bad_kernel_launch(1, stream_id(3), read_write=[tensor_id(1)])
def test_multiple_wait(self):
# Tests that a wait operation can be performed multiple times on the same event
# by different streams.
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.handler._handle_event_record(event_id(1), stream_id(1))
self.handler._handle_event_wait(event_id(1), stream_id(2))
self.handler._handle_event_wait(event_id(1), stream_id(3))
self.assert_good_kernel_launch(stream_id(2), read_only=[tensor_id(1)])
self.assert_good_kernel_launch(stream_id(3), read_only=[tensor_id(1)])
def test_device_synchronize(self):
# Tests that a device synchronization does correctly cause all streams
# to synchronize with each other.
iterations = 10
for i in range(1, iterations):
self.assert_good_kernel_launch(stream_id(i), read_write=[tensor_id(i)])
self.handler._handle_device_synchronization()
self.assert_good_kernel_launch(
stream_id(0), read_write=[tensor_id(i) for i in range(1, iterations)]
)
def test_device_synchronization_expired(self):
# Tests that a device synchronization is a one-time synchronization.
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.handler._handle_device_synchronization()
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.assert_bad_kernel_launch(1, stream_id(2), read_write=[tensor_id(1)])
def test_new_stream_is_synchronized(self):
# Tests that after synchronizing operations with the host, any newly created
# stream is guaranteed to be synchronized with them as well.
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.handler._handle_device_synchronization()
self.handler._handle_stream_creation(stream_id(2))
self.assert_good_kernel_launch(stream_id(2), read_write=[tensor_id(1)])
def test_stream_synchronize(self):
# Tests that a stream synchronization does correctly cause all streams to wait
# for one specific stream, but does not synchronize all streams with each other.
self.assert_good_kernel_launch(stream_id(0), read_write=[tensor_id(1)])
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(2)])
self.handler._handle_stream_synchronization(stream_id(0))
self.assert_good_kernel_launch(stream_id(2), read_only=[tensor_id(1)])
self.assert_good_kernel_launch(stream_id(3), read_only=[tensor_id(1)])
self.assert_bad_kernel_launch(1, stream_id(4), read_only=[tensor_id(2)])
def test_event_synchronize(self):
# Tests that an event synchronization does correctly cause all streams to wait
# for a recorded event, but does not guarantee synchronization with the current
# state of the stream that recorded the event.
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(1)])
self.handler._handle_event_record(event_id(1), stream_id(1))
self.assert_good_kernel_launch(stream_id(1), read_write=[tensor_id(2)])
self.handler._handle_event_synchronization(event_id(1))
self.assert_good_kernel_launch(stream_id(2), read_write=[tensor_id(1)])
self.assert_bad_kernel_launch(1, stream_id(2), read_write=[tensor_id(2)])
| TestEventHandler |
python | great-expectations__great_expectations | great_expectations/execution_engine/sqlalchemy_execution_engine.py | {
"start": 5664,
"end": 7253
} | class ____(ValueError):
def __init__(self, filter_clause: Any) -> None:
super().__init__(f"Invalid filter clause: {type(filter_clause)}")
def _dialect_requires_persisted_connection(
connection_string: str | None = None,
credentials: dict | None = None,
url: str | None = None,
) -> bool:
"""Determine if the dialect needs a persisted connection.
dialect_name isn't available yet since the engine isn't yet created when we call this method,
so we determine the dialect from the creds/url/params.
Args:
connection_string: Database connection string to check
credentials: Dictionary of database connection credentials. Only `drivername` is checked.
url: Database connection URL to parse and check.
Returns:
Boolean indicating whether the dialect requires a persisted connection.
"""
if sum(bool(x) for x in [connection_string, credentials, url is not None]) != 1:
raise ValueError("Exactly one of connection_string, credentials, url must be specified") # noqa: TRY003 # FIXME CoP
return_val = False
if connection_string is not None:
str_to_check = connection_string
elif credentials is not None:
drivername = credentials.get("drivername", "")
str_to_check = drivername
else:
parsed_url = make_url(url)
str_to_check = parsed_url.drivername
if any(
str_to_check.startswith(dialect_name.value)
for dialect_name in _PERSISTED_CONNECTION_DIALECTS
):
return_val = True
return return_val
| InvalidFilterClause |
python | huggingface__transformers | examples/pytorch/text-classification/run_xnli.py | {
"start": 2035,
"end": 3935
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
@dataclass
| DataTrainingArguments |
python | ray-project__ray | python/ray/util/dask/callbacks.py | {
"start": 7732,
"end": 10511
} | class ____(RayDaskCallback):
def __init__(self):
@ray.remote
class ProgressBarActor:
def __init__(self):
self._init()
def submit(self, key, deps, now):
for dep in deps.keys():
self.deps[key].add(dep)
self.submitted[key] = now
self.submission_queue.append((key, now))
def task_scheduled(self, key, now):
self.scheduled[key] = now
def finish(self, key, now):
self.finished[key] = now
def result(self):
return len(self.submitted), len(self.finished)
def report(self):
result = defaultdict(dict)
for key, finished in self.finished.items():
submitted = self.submitted[key]
scheduled = self.scheduled[key]
# deps = self.deps[key]
result[key]["execution_time"] = (
finished - scheduled
).total_seconds()
# Calculate the scheduling time.
# This is inaccurate.
# We should subtract scheduled - (last dep completed).
# But currently it is not easy because
# of how getitem is implemented in dask on ray sort.
result[key]["scheduling_time"] = (
scheduled - submitted
).total_seconds()
result["submission_order"] = self.submission_queue
return result
def ready(self):
pass
def reset(self):
self._init()
def _init(self):
self.submission_queue = []
self.submitted = defaultdict(None)
self.scheduled = defaultdict(None)
self.finished = defaultdict(None)
self.deps = defaultdict(set)
try:
self.pb = ray.get_actor("_dask_on_ray_pb")
ray.get(self.pb.reset.remote())
except ValueError:
self.pb = ProgressBarActor.options(name="_dask_on_ray_pb").remote()
ray.get(self.pb.ready.remote())
def _ray_postsubmit(self, task, key, deps, object_ref):
# Indicate the dask task is submitted.
self.pb.submit.remote(key, deps, datetime.now())
def _ray_pretask(self, key, object_refs):
self.pb.task_scheduled.remote(key, datetime.now())
def _ray_posttask(self, key, result, pre_state):
# Indicate the dask task is finished.
self.pb.finish.remote(key, datetime.now())
def _ray_finish(self, result):
print("All tasks are completed.")
| ProgressBarCallback |
python | readthedocs__readthedocs.org | readthedocs/oauth/admin.py | {
"start": 1200,
"end": 1702
} | class ____(admin.ModelAdmin):
"""Admin configuration for the RemoteOrganization model."""
readonly_fields = (
"created",
"modified",
)
search_fields = (
"name",
"slug",
"email",
"url",
"remote_id",
)
list_filter = ("vcs_provider",)
list_display = (
"id",
"name",
"slug",
"email",
"get_vcs_provider_display",
)
@admin.register(RemoteRepositoryRelation)
| RemoteOrganizationAdmin |
python | getsentry__sentry | tests/sentry/web/frontend/test_oauth_token.py | {
"start": 17600,
"end": 21097
} | class ____(TestCase):
@cached_property
def path(self) -> str:
return "/oauth/token/"
def setUp(self) -> None:
super().setUp()
self.application = ApiApplication.objects.create(
owner=self.user, redirect_uris="https://example.com"
)
self.client_secret = self.application.client_secret
self.grant = ApiGrant.objects.create(
user=self.user, application=self.application, redirect_uri="https://example.com"
)
self.token = ApiToken.objects.create(
application=self.application, user=self.user, expires_at=timezone.now()
)
def test_missing_client_id(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "refresh_token",
"refresh_token": self.token.refresh_token,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 401
assert json.loads(resp.content) == {"error": "invalid_client"}
def test_invalid_client_id(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "refresh_token",
"client_id": "abc",
"refresh_token": self.token.refresh_token,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 401
assert json.loads(resp.content) == {"error": "invalid_client"}
def test_missing_refresh_token(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "refresh_token",
"client_id": self.application.client_id,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
assert json.loads(resp.content) == {"error": "invalid_request"}
def test_invalid_refresh_token(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "refresh_token",
"client_id": self.application.client_id,
"refresh_token": "foo",
"client_secret": self.client_secret,
},
)
assert resp.status_code == 400
assert json.loads(resp.content) == {"error": "invalid_grant"}
def test_valid_params(self) -> None:
self.login_as(self.user)
resp = self.client.post(
self.path,
{
"grant_type": "refresh_token",
"client_id": self.application.client_id,
"refresh_token": self.token.refresh_token,
"client_secret": self.client_secret,
},
)
assert resp.status_code == 200
token2 = ApiToken.objects.get(id=self.token.id)
assert token2.application == self.token.application
assert token2.user == self.token.user
assert token2.get_scopes() == self.token.get_scopes()
assert self.token.expires_at is not None
assert token2.expires_at is not None
assert token2.expires_at > self.token.expires_at
assert token2.token != self.token.token
assert token2.refresh_token != self.token.refresh_token
assert token2.refresh_token
@control_silo_test
| OAuthTokenRefreshTokenTest |
python | gevent__gevent | src/greentest/3.14/test_urllib2_localnet.py | {
"start": 644,
"end": 1544
} | class ____(http.server.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
http.server.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(0.1)
def get_request(self):
"""HTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
| LoopbackHttpServer |
python | scrapy__scrapy | scrapy/crawler.py | {
"start": 13854,
"end": 17082
} | class ____(CrawlerRunnerBase):
"""
This is a convenient helper class that keeps track of, manages and runs
crawlers inside an already setup :mod:`~twisted.internet.reactor`.
The CrawlerRunner object must be instantiated with a
:class:`~scrapy.settings.Settings` object.
This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
This class provides Deferred-based APIs. Use :class:`AsyncCrawlerRunner`
for modern coroutine APIs.
"""
def __init__(self, settings: dict[str, Any] | Settings | None = None):
super().__init__(settings)
self._active: set[Deferred[None]] = set()
def crawl(
self,
crawler_or_spidercls: type[Spider] | str | Crawler,
*args: Any,
**kwargs: Any,
) -> Deferred[None]:
"""
Run a crawler with the provided arguments.
It will call the given Crawler's :meth:`~Crawler.crawl` method, while
keeping track of it so it can be stopped later.
If ``crawler_or_spidercls`` isn't a :class:`~scrapy.crawler.Crawler`
instance, this method will try to create one using this parameter as
the spider class given to it.
Returns a deferred that is fired when the crawling is finished.
:param crawler_or_spidercls: already created crawler, or a spider class
or spider's name inside the project to create it
:type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
:class:`~scrapy.spiders.Spider` subclass or string
:param args: arguments to initialize the spider
:param kwargs: keyword arguments to initialize the spider
"""
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
"it must be a spider class (or a Crawler object)"
)
crawler = self.create_crawler(crawler_or_spidercls)
return self._crawl(crawler, *args, **kwargs)
@inlineCallbacks
def _crawl(
self, crawler: Crawler, *args: Any, **kwargs: Any
) -> Generator[Deferred[Any], Any, None]:
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
try:
yield d
finally:
self.crawlers.discard(crawler)
self._active.discard(d)
self.bootstrap_failed |= not getattr(crawler, "spider", None)
def stop(self) -> Deferred[Any]:
"""
Stops simultaneously all the crawling jobs taking place.
Returns a deferred that is fired when they all have ended.
"""
return DeferredList(deferred_from_coro(c.stop_async()) for c in self.crawlers)
@inlineCallbacks
def join(self) -> Generator[Deferred[Any], Any, None]:
"""
join()
Returns a deferred that is fired when all managed :attr:`crawlers` have
completed their executions.
"""
while self._active:
yield DeferredList(self._active)
| CrawlerRunner |
python | PrefectHQ__prefect | src/integrations/prefect-azure/prefect_azure/container_instance.py | {
"start": 2921,
"end": 3074
} | class ____(str, Enum):
"""
Terminal run states for ACI containers.
"""
RUNNING = "Running"
TERMINATED = "Terminated"
| ContainerRunState |
python | getsentry__sentry | src/sentry/api/endpoints/organization_profiling_functions.py | {
"start": 2098,
"end": 2407
} | class ____(serializers.Serializer):
function = serializers.CharField(max_length=10)
trend = TrendTypeField()
query = serializers.CharField(required=False)
threshold = serializers.IntegerField(min_value=0, max_value=1000, default=16, required=False)
@region_silo_endpoint
| FunctionTrendsSerializer |
python | astropy__astropy | astropy/utils/metadata/tests/test_metadata.py | {
"start": 2101,
"end": 2221
} | class ____(MetaBaseTest):
test_class = ExampleDataclass
args = ()
@dataclass(frozen=True)
| TestMetaExampleDataclass |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-markitdown/llama_index/readers/markitdown/base.py | {
"start": 3110,
"end": 4572
} | class ____(BaseReader):
"""
MarkItDownReader is a document reader that utilizes the MarkItDown parser to convert files or collections of files into Document objects.
Methods
-------
load_data(file_path: str | Path | Iterable[str] | Iterable[Path]) -> List[Document]
Loads and parses a directory (if `file_path` is `str` or `Path`) or a list of files specified by `file_path` using the MarkItDown parser.
Returns a list of Document objects, each containing the text content and metadata such as file path, file type, and content length.
"""
_reader: MarkItDown = MarkItDown()
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "MarkItDownReader"
def load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
**kwargs,
) -> List[Document]:
docs: List[Document] = []
fl_pt = ValidFilePath(file_path=file_path)
fs = fl_pt.file_path
for f in fs:
res = self._reader.convert(f)
docs.append(
Document(
text=res.text_content,
metadata={
"file_path": f.__str__(),
"file_type": os.path.splitext(f)[1],
"content_length": len(res.text_content),
},
)
)
return docs
| MarkItDownReader |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 9470,
"end": 9542
} | class ____(SignedIntegerType):
"""16-bit signed integer type."""
| Int16 |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/json_viewer.py | {
"start": 303,
"end": 3478
} | class ____(MetaflowCardComponent):
"""
A component for displaying JSON data with syntax highlighting and collapsible sections.
This component provides a rich view of JSON data with proper formatting, syntax highlighting,
and the ability to collapse/expand sections for better readability.
Example:
```python
from metaflow.cards import JSONViewer
from metaflow import current
data = {
"user": {"name": "Alice", "age": 30},
"items": [{"id": 1, "name": "Item 1"}, {"id": 2, "name": "Item 2"}],
"metadata": {"created": "2024-01-01", "version": "1.0"}
}
json_viewer = JSONViewer(data, collapsible=True, max_height="400px")
current.card.append(json_viewer)
```
Parameters
----------
data : Any
The data to display as JSON. Will be serialized using json.dumps().
collapsible : bool, default True
Whether to make the JSON viewer collapsible.
max_height : str, optional
Maximum height for the viewer (CSS value like "300px" or "20rem").
show_copy_button : bool, default True
Whether to show a copy-to-clipboard button.
"""
type = "jsonViewer"
REALTIME_UPDATABLE = True
def __init__(
self,
data: Any,
collapsible: bool = True,
max_height: Optional[str] = None,
show_copy_button: bool = True,
title: Optional[str] = None,
):
self._data = data
self._collapsible = collapsible
self._max_height = max_height
self._show_copy_button = show_copy_button
self._title = title
def update(self, data: Any):
"""
Update the JSON data.
Parameters
----------
data : Any
New data to display as JSON.
"""
self._data = data
@with_default_component_id
@render_safely
def render(self):
# Serialize data to JSON string
try:
if isinstance(self._data, str):
# If already a string, try to parse and re-serialize for formatting
try:
parsed = json.loads(self._data)
json_string = json.dumps(parsed, indent=2, ensure_ascii=False)
except json.JSONDecodeError:
# If not valid JSON, treat as plain string
json_string = json.dumps(self._data, indent=2, ensure_ascii=False)
else:
json_string = json.dumps(
self._data, indent=2, ensure_ascii=False, default=str
)
except Exception as e:
# Fallback for non-serializable objects
json_string = json.dumps(
{"error": f"Could not serialize data: {str(e)}"}, indent=2
)
data = {
"type": self.type,
"id": self.component_id,
"json_string": json_string,
"collapsible": self._collapsible,
"show_copy_button": self._show_copy_button,
"title": self._title or "JSON",
}
if self._max_height:
data["max_height"] = self._max_height
return data
| JSONViewer |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/_typed_dict_helper.py | {
"start": 684,
"end": 745
} | class ____(TypedDict, Generic[T]):
a: Optional[T]
| FooGeneric |
python | django__django | django/db/models/functions/text.py | {
"start": 9973,
"end": 10418
} | class ____(Func):
"""
Return a positive integer corresponding to the 1-indexed position of the
first occurrence of a substring inside another string, or 0 if the
substring is not found.
"""
function = "INSTR"
arity = 2
output_field = IntegerField()
def as_postgresql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function="STRPOS", **extra_context)
| StrIndex |
python | PyCQA__pylint | pylint/typing.py | {
"start": 3205,
"end": 3374
} | class ____(Protocol):
def __call__(
self, config: PyreverseConfig | None = None, args: Sequence[str] | None = None
) -> DiaDefGenerator: ...
| GeneratorFactory |
python | celery__celery | t/unit/worker/test_components.py | {
"start": 2257,
"end": 2461
} | class ____:
def test_create__green(self):
w = Mock(name='w')
w.pool_cls.__module__ = 'foo_gevent'
with pytest.raises(ImproperlyConfigured):
Beat(w).create(w)
| test_Beat |
python | streamlit__streamlit | lib/streamlit/elements/plotly_chart.py | {
"start": 2847,
"end": 5497
} | class ____(TypedDict, total=False):
"""
The schema for the Plotly chart selection state.
The selection state is stored in a dictionary-like object that supports both
key and attribute notation. Selection states cannot be programmatically
changed or set through Session State.
Attributes
----------
points : list[dict[str, Any]]
The selected data points in the chart, including the data points
selected by the box and lasso mode. The data includes the values
associated to each point and a point index used to populate
``point_indices``. If additional information has been assigned to your
points, such as size or legend group, this is also included.
point_indices : list[int]
The numerical indices of all selected data points in the chart. The
details of each identified point are included in ``points``.
box : list[dict[str, Any]]
The metadata related to the box selection. This includes the
coordinates of the selected area.
lasso : list[dict[str, Any]]
The metadata related to the lasso selection. This includes the
coordinates of the selected area.
Example
-------
When working with more complicated graphs, the ``points`` attribute
displays additional information. Try selecting points in the following
example:
>>> import plotly.express as px
>>> import streamlit as st
>>>
>>> df = px.data.iris()
>>> fig = px.scatter(
... df,
... x="sepal_width",
... y="sepal_length",
... color="species",
... size="petal_length",
... hover_data=["petal_width"],
... )
>>>
>>> event = st.plotly_chart(fig, key="iris", on_select="rerun")
>>>
>>> event.selection
.. output::
https://doc-chart-events-plotly-selection-state.streamlit.app
height: 600px
This is an example of the selection state when selecting a single point:
>>> {
>>> "points": [
>>> {
>>> "curve_number": 2,
>>> "point_number": 9,
>>> "point_index": 9,
>>> "x": 3.6,
>>> "y": 7.2,
>>> "customdata": [
>>> 2.5
>>> ],
>>> "marker_size": 6.1,
>>> "legendgroup": "virginica"
>>> }
>>> ],
>>> "point_indices": [
>>> 9
>>> ],
>>> "box": [],
>>> "lasso": []
>>> }
"""
points: Required[list[dict[str, Any]]]
point_indices: Required[list[int]]
box: Required[list[dict[str, Any]]]
lasso: Required[list[dict[str, Any]]]
| PlotlySelectionState |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0042_increase_env_variable_value_max_length.py | {
"start": 150,
"end": 534
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0041_index-repo-field"),
]
operations = [
migrations.AlterField(
model_name="environmentvariable",
name="value",
field=models.CharField(help_text="Value of the environment variable", max_length=2048),
),
]
| Migration |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-dashscope/llama_index/readers/dashscope/domain/lease_domains.py | {
"start": 10013,
"end": 10375
} | class ____(Enum):
INIT = "INIT"
PARSING = "PARSING"
PARSE_SUCCESS = "PARSE_SUCCESS"
PARSE_FAILED = "PARSE_FAILED"
@classmethod
def from_value(cls, value):
for member in cls:
if member.value == value:
return member
raise ValueError(f"No enum member found for value '{value}'")
| DatahubDataStatusEnum |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_relationship.py | {
"start": 64263,
"end": 66310
} | class ____(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""exercise issue #3611, using the test from dupe issue 3614"""
run_define_tables = None
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
class UserRole(Base):
__tablename__ = "user_roles"
id = Column(Integer, primary_key=True)
row_type = Column(String(50), nullable=False)
__mapper_args__ = {"polymorphic_on": row_type}
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
user = relationship("User", lazy=False)
class Admin(UserRole):
__tablename__ = "admins"
__mapper_args__ = {"polymorphic_identity": "admin"}
id = Column(Integer, ForeignKey("user_roles.id"), primary_key=True)
class Thing(Base):
__tablename__ = "things"
id = Column(Integer, primary_key=True)
admin_id = Column(Integer, ForeignKey("admins.id"))
admin = relationship("Admin", lazy=False)
def test_query(self):
Thing = self.classes.Thing
sess = fixture_session()
self.assert_compile(
sess.query(Thing),
"SELECT things.id AS things_id, "
"things.admin_id AS things_admin_id, "
"users_1.id AS users_1_id, admins_1.id AS admins_1_id, "
"user_roles_1.id AS user_roles_1_id, "
"user_roles_1.row_type AS user_roles_1_row_type, "
"user_roles_1.user_id AS user_roles_1_user_id FROM things "
"LEFT OUTER JOIN (user_roles AS user_roles_1 JOIN admins "
"AS admins_1 ON user_roles_1.id = admins_1.id) ON "
"admins_1.id = things.admin_id "
"LEFT OUTER JOIN users AS "
"users_1 ON users_1.id = user_roles_1.user_id",
)
| JoinedloadSinglePolysubSingle |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_wpwazpcdm.py | {
"start": 996,
"end": 2424
} | class ____(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` wp on a Cosmology.
wp is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_wp(self, cosmo_cls, cosmo):
"""Test Parameter ``wp``."""
# on the class
wp = cosmo_cls.parameters["wp"]
assert isinstance(wp, Parameter)
assert "at the pivot" in wp.__doc__
assert wp.unit is None
assert wp.default == -1.0
# on the instance
assert cosmo.wp is cosmo.__dict__["wp"]
assert cosmo.wp == self.cls_kwargs["wp"]
def test_init_wp(self, cosmo_cls, ba):
"""Test initialization for values of ``wp``."""
# test that it works with units
ba.arguments["wp"] = ba.arguments["wp"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wp == ba.arguments["wp"]
# also without units
ba.arguments["wp"] = ba.arguments["wp"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.wp == ba.arguments["wp"]
# must be dimensionless
ba.arguments["wp"] = 10 * u.km
with pytest.raises(TypeError):
cosmo_cls(*ba.args, **ba.kwargs)
| ParameterwpTestMixin |
python | tensorflow__tensorflow | tensorflow/python/tools/api/generator/create_python_api.py | {
"start": 2210,
"end": 3355
} | class ____(Exception):
"""Raised when different symbols are exported with the same name."""
pass
def get_canonical_import(import_set):
"""Obtain one single import from a set of possible sources of a symbol.
One symbol might come from multiple places as it is being imported and
reexported. To simplify API changes, we always use the same import for the
same module, and give preference based on higher priority and alphabetical
ordering.
Args:
import_set: (set) Imports providing the same symbol. This is a set of tuples
in the form (import, priority). We want to pick an import with highest
priority.
Returns:
A module name to import
"""
# We use the fact that list sorting is stable, so first we convert the set to
# a sorted list of the names and then we resort this list to move elements
# not in core tensorflow to the end.
# Here we sort by priority (higher preferred) and then alphabetically by
# import string.
import_list = sorted(
import_set,
key=lambda imp_and_priority: (-imp_and_priority[1], imp_and_priority[0]))
return import_list[0][0]
| SymbolExposedTwiceError |
python | kubernetes-client__python | kubernetes/client/models/v1_env_var.py | {
"start": 383,
"end": 5972
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'value': 'str',
'value_from': 'V1EnvVarSource'
}
attribute_map = {
'name': 'name',
'value': 'value',
'value_from': 'valueFrom'
}
def __init__(self, name=None, value=None, value_from=None, local_vars_configuration=None): # noqa: E501
"""V1EnvVar - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._value = None
self._value_from = None
self.discriminator = None
self.name = name
if value is not None:
self.value = value
if value_from is not None:
self.value_from = value_from
@property
def name(self):
"""Gets the name of this V1EnvVar. # noqa: E501
Name of the environment variable. May consist of any printable ASCII characters except '='. # noqa: E501
:return: The name of this V1EnvVar. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1EnvVar.
Name of the environment variable. May consist of any printable ASCII characters except '='. # noqa: E501
:param name: The name of this V1EnvVar. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def value(self):
"""Gets the value of this V1EnvVar. # noqa: E501
Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\". # noqa: E501
:return: The value of this V1EnvVar. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1EnvVar.
Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\". # noqa: E501
:param value: The value of this V1EnvVar. # noqa: E501
:type: str
"""
self._value = value
@property
def value_from(self):
"""Gets the value_from of this V1EnvVar. # noqa: E501
:return: The value_from of this V1EnvVar. # noqa: E501
:rtype: V1EnvVarSource
"""
return self._value_from
@value_from.setter
def value_from(self, value_from):
"""Sets the value_from of this V1EnvVar.
:param value_from: The value_from of this V1EnvVar. # noqa: E501
:type: V1EnvVarSource
"""
self._value_from = value_from
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EnvVar):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EnvVar):
return True
return self.to_dict() != other.to_dict()
| V1EnvVar |
python | huggingface__transformers | tests/models/gpt_oss/test_modeling_gpt_oss.py | {
"start": 6699,
"end": 21037
} | class ____(unittest.TestCase):
input_text = [
"Roses are red, violets",
"How are you? Tell me the name of the president of",
]
@staticmethod
def generate_config_key(quantized, model, kernels, attn_impl, mode):
"""Generate a key for the restructured integration test results."""
return f"quantized={str(quantized).lower()}|model={model}|kernels={str(kernels).lower()}|attn_impl={attn_impl}|mode={mode}"
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
# ------------------------
# Non-distributed inference
# ------------------------
@staticmethod
def load_and_forward(model_id, attn_implementation, input_text, **pretrained_kwargs):
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation=attn_implementation,
**pretrained_kwargs,
)
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
inputs = tokenizer(input_text, return_tensors="pt", padding=True).to(model.device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
return output_text
# ------------------------
# Distributed inference using inspect
# ------------------------
@staticmethod
def run_distributed_test(quantized, model, kernels, attn_impl, mode):
"""Launch torchrun using a temporary worker file generated from inspect.getsource()."""
import textwrap
# Extract worker function source dynamically
worker_src = inspect.getsource(distributed_worker)
# Create a temp file that calls the worker
script_code = f"""
import sys
import json
RESULTS_PATH = "{RESULTS_PATH}"
{worker_src}
if __name__ == "__main__":
distributed_worker("{quantized}", "{model}", "{kernels}", "{attn_impl}", "{mode}")
"""
# Dedent for proper formatting
script_code = textwrap.dedent(script_code)
# Write to temp file
with tempfile.NamedTemporaryFile("w", suffix="_worker.py", delete=False) as tmp:
tmp.write(script_code)
tmp_path = tmp.name
# Launch torchrun
cmd = [
"torchrun",
f"--nproc_per_node={NUM_GPUS}",
tmp_path,
]
subprocess.run(cmd, check=True)
# Cleanup
os.remove(tmp_path)
# ------------------------
# Shared parameterization
# ------------------------
PARAMETERS = [
(False, "20b", False, "eager", "eval"),
(False, "20b", False, "eager", "train"),
(False, "20b", False, "kernels-community/vllm-flash-attn3", "eval"),
(False, "20b", False, "kernels-community/vllm-flash-attn3", "train"),
(False, "20b", True, "eager", "eval"),
(False, "20b", True, "eager", "train"),
(False, "20b", True, "kernels-community/vllm-flash-attn3", "eval"),
(False, "20b", True, "kernels-community/vllm-flash-attn3", "train"),
(True, "20b", False, "eager", "eval"),
(True, "20b", False, "eager", "train"),
(True, "20b", False, "kernels-community/vllm-flash-attn3", "eval"),
(True, "20b", False, "kernels-community/vllm-flash-attn3", "train"),
(True, "20b", True, "eager", "eval"),
(True, "20b", True, "eager", "train"),
(True, "20b", True, "kernels-community/vllm-flash-attn3", "eval"),
(True, "20b", True, "kernels-community/vllm-flash-attn3", "train"),
(False, "120b", False, "eager", "eval"),
(False, "120b", False, "eager", "train"),
(False, "120b", False, "kernels-community/vllm-flash-attn3", "eval"),
(False, "120b", False, "kernels-community/vllm-flash-attn3", "train"),
(False, "120b", True, "eager", "eval"),
(False, "120b", True, "eager", "train"),
(False, "120b", True, "kernels-community/vllm-flash-attn3", "eval"),
(False, "120b", True, "kernels-community/vllm-flash-attn3", "train"),
(True, "120b", False, "eager", "eval"),
(True, "120b", False, "eager", "train"),
(True, "120b", False, "kernels-community/vllm-flash-attn3", "eval"),
(True, "120b", False, "kernels-community/vllm-flash-attn3", "train"),
(True, "120b", True, "eager", "eval"),
(True, "120b", True, "eager", "train"),
(True, "120b", True, "kernels-community/vllm-flash-attn3", "eval"),
(True, "120b", True, "kernels-community/vllm-flash-attn3", "train"),
]
# ------------------------
# Non-distributed test
# ------------------------
@parameterized.expand(PARAMETERS)
@require_read_token
def test_model_outputs(self, quantized, model, kernels, attn_impl, mode):
model_id = f"openai/gpt-oss-{model}"
output_texts = self.load_and_forward(
model_id,
attn_impl,
self.input_text,
use_kernels=kernels,
)
# Generate key to look up expected outputs
key = self.generate_config_key(quantized, model, kernels, attn_impl, mode)
# Load expected outputs from restructured JSON
if os.path.exists(RESULTS_PATH):
with open(RESULTS_PATH, "r") as f:
expected_results = json.load(f)
# Check if we have expected results for this configuration
if key in expected_results:
expected_outputs = expected_results[key]
# Compare actual outputs with expected outputs
self.assertEqual(len(output_texts), len(expected_outputs), f"Output length mismatch for {key}")
for i, (actual, expected) in enumerate(zip(output_texts, expected_outputs)):
actual_stripped = actual.strip()
expected_stripped = expected.strip()
# Make lengths match by taking minimum length to be resilient to generation differences
min_length = min(len(actual_stripped), len(expected_stripped))
actual_truncated = actual_stripped[:min_length]
expected_truncated = expected_stripped[:min_length]
if actual_truncated != expected_truncated:
diff = "\n".join(
difflib.unified_diff(
expected_truncated.splitlines(keepends=True),
actual_truncated.splitlines(keepends=True),
fromfile=f"expected[{i}]",
tofile=f"actual[{i}]",
lineterm="",
)
)
self.fail(
f"Output mismatch at index {i} for {key}:\n"
f"Expected: '{expected_stripped}'\n"
f"Actual: '{actual_stripped}'\n"
f"Diff (truncated to min length {min_length}):\n{diff}"
)
else:
# If no expected results exist, this is a new configuration
# We could optionally add it to the results file here
print(f"Warning: No expected results found for configuration: {key}")
self.assertIsInstance(output_texts, list)
self.assertTrue(all(isinstance(x, str) for x in output_texts))
# ------------------------
# Distributed test
# ------------------------
@parameterized.expand(PARAMETERS)
@require_read_token
def test_model_outputs_distributed(self, quantized, model, kernels, attn_impl, mode):
self.run_distributed_test(quantized, model, kernels, attn_impl, mode)
# ------------------------
# Training test
# ------------------------
@parameterized.expand(PARAMETERS)
@require_read_token
def test_training_step(self, quantized, model, kernels, attn_impl, mode):
if mode != "train":
self.skipTest("This test is only for training mode.")
if quantized:
self.skipTest("Training test for quantized models is not supported.")
model_id = f"openai/gpt-oss-{model}"
model_obj = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation=attn_impl,
use_kernels=kernels,
)
model_obj.train()
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(model_obj.device)
inputs["labels"] = inputs["input_ids"].clone()
outputs = model_obj(**inputs)
loss = outputs.loss
self.assertIsNotNone(loss)
loss.backward()
# Check that gradients were computed for all parameters that have a grad field
for name, param in model_obj.named_parameters():
if param.requires_grad:
self.assertIsNotNone(param.grad, f"Parameter '{name}' did not receive a gradient.")
# Check that gradients are not all zero
self.assertTrue(
torch.sum(torch.abs(param.grad)).item() > 0, f"Gradient for parameter '{name}' is all zeros."
)
def test_model_matches_original_20b(self):
input_text = "Roses are red, violets"
original_output = "Roses are red, violets are blue, I love you, and I love you too."
original_logprobs = torch.tensor(
[
-0.037353515625,
-0.08154296875,
-1.21875,
-1.953125,
-2.234375,
-0.96875,
-1.546875,
-1.640625,
-0.93359375,
-1.609375,
-1.625,
-0.85546875,
-1.7265625,
-0.7421875,
-2.078125,
-0.006561279296875,
-0.10498046875,
-0.1767578125,
-0.1240234375,
-0.099609375,
]
)
model_id = "openai/gpt-oss-20b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation="eager",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokens = tokenizer(input_text)["input_ids"]
num_generated_tokens = 0
with torch.no_grad():
for i in range(12):
tensors = torch.as_tensor(tokens, dtype=torch.int32, device=model.device).unsqueeze(0)
logits = model(tensors).logits[0]
predicted_token = torch.argmax(logits[-1, :], dim=-1).item()
logprobs = torch.log_softmax(logits[-1, :], dim=-1)
selected_logprobs = logprobs[predicted_token]
tokens.append(predicted_token)
num_generated_tokens += 1
decoded_token = tokenizer.decode([predicted_token])
logprob_differences = selected_logprobs - original_logprobs[i]
print(
f"Generated token: {repr(decoded_token)}, logprob: {selected_logprobs}, logprob differences: {logprob_differences}"
)
torch.testing.assert_close(
selected_logprobs.cpu().to(original_logprobs.dtype), original_logprobs[i], atol=1e-1, rtol=1e-1
)
decoded_string = tokenizer.decode(tokens)
self.assertTrue(original_output.startswith(decoded_string))
def test_model_matches_original_120b(self):
input_text = "Roses are red, violets"
original_output = """Roses are red, violets are blue,
I am a language model, not a human being"""
original_logprobs = torch.tensor(
[
-0.90234375,
-0.66015625,
-1.546875,
-2.703125,
-2.078125,
-1.21875,
-2.484375,
-0.031982421875,
-0.84765625,
-1.890625,
-0.1923828125,
-2.046875,
-1.65625,
-1.3515625,
-1.1640625,
-0.3671875,
-1.9921875,
-1.5390625,
-1.46875,
-0.85546875,
]
)
model_id = "openai/gpt-oss-120b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation="eager",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokens = tokenizer(input_text)["input_ids"]
num_generated_tokens = 0
with torch.no_grad():
for i in range(12):
tensors = torch.as_tensor(tokens, dtype=torch.int32, device=model.device).unsqueeze(0)
logits = model(tensors).logits[0]
predicted_token = torch.argmax(logits[-1, :], dim=-1).item()
logprobs = torch.log_softmax(logits[-1, :], dim=-1)
selected_logprobs = logprobs[predicted_token]
tokens.append(predicted_token)
num_generated_tokens += 1
decoded_token = tokenizer.decode([predicted_token])
logprob_differences = selected_logprobs - original_logprobs[i]
print(
f"Generated token: {repr(decoded_token)}, logprob: {selected_logprobs}, logprob differences: {logprob_differences}"
)
torch.testing.assert_close(
selected_logprobs.cpu().to(original_logprobs.dtype), original_logprobs[i], atol=1e-1, rtol=1e-1
)
decoded_string = tokenizer.decode(tokens)
self.assertTrue(original_output.startswith(decoded_string))
| GptOssIntegrationTest |
python | numba__numba | numba/core/ir.py | {
"start": 22196,
"end": 22553
} | class ____(Terminator):
is_exit = True
def __init__(self, exception, loc):
assert exception is None or isinstance(exception, Var)
assert isinstance(loc, Loc)
self.exception = exception
self.loc = loc
def __str__(self):
return "raise %s" % self.exception
def get_targets(self):
return []
| Raise |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 8693,
"end": 16673
} | class ____:
@default_selected_gdb_frame(err=False)
def is_cython_function(self, frame):
return frame.name() in self.cy.functions_by_cname
@default_selected_gdb_frame(err=False)
def is_python_function(self, frame):
"""
Tells if a frame is associated with a Python function.
If we can't read the Python frame information, don't regard it as such.
"""
if frame.name() == 'PyEval_EvalFrameEx':
pyframe = libpython.Frame(frame).get_pyop()
return pyframe and not pyframe.is_optimized_out()
return False
@default_selected_gdb_frame()
def get_c_function_name(self, frame):
return frame.name()
@default_selected_gdb_frame()
def get_c_lineno(self, frame):
return frame.find_sal().line
@default_selected_gdb_frame()
def get_cython_function(self, frame):
result = self.cy.functions_by_cname.get(frame.name())
if result is None:
raise NoCythonFunctionInFrameError()
return result
@default_selected_gdb_frame()
def get_cython_lineno(self, frame):
"""
Get the current Cython line number. Returns ("<no filename>", 0) if there is no
correspondence between the C and Cython code.
"""
cyfunc = self.get_cython_function(frame)
return cyfunc.module.lineno_c2cy.get(self.get_c_lineno(frame), ("<no filename>", 0))
@default_selected_gdb_frame()
def get_source_desc(self, frame):
filename = lineno = lexer = None
if self.is_cython_function(frame):
filename = self.get_cython_function(frame).module.filename
filename_and_lineno = self.get_cython_lineno(frame)
assert filename == filename_and_lineno[0]
lineno = filename_and_lineno[1]
if pygments:
lexer = pygments.lexers.CythonLexer(stripall=False)
elif self.is_python_function(frame):
pyframeobject = libpython.Frame(frame).get_pyop()
if not pyframeobject:
raise gdb.GdbError(
'Unable to read information on python frame')
filename = pyframeobject.filename()
lineno = pyframeobject.current_line_num()
if pygments:
lexer = pygments.lexers.PythonLexer(stripall=False)
else:
symbol_and_line_obj = frame.find_sal()
if not symbol_and_line_obj or not symbol_and_line_obj.symtab:
filename = None
lineno = 0
else:
filename = symbol_and_line_obj.symtab.fullname()
lineno = symbol_and_line_obj.line
if pygments:
lexer = pygments.lexers.CLexer(stripall=False)
return SourceFileDescriptor(filename, lexer), lineno
@default_selected_gdb_frame()
def get_source_line(self, frame):
source_desc, lineno = self.get_source_desc()
return source_desc.get_source(lineno)
@default_selected_gdb_frame()
def is_relevant_function(self, frame):
"""
returns whether we care about a frame on the user-level when debugging
Cython code
"""
name = frame.name()
older_frame = frame.older()
if self.is_cython_function(frame) or self.is_python_function(frame):
return True
elif older_frame and self.is_cython_function(older_frame):
# check for direct C function call from a Cython function
cython_func = self.get_cython_function(older_frame)
return name in cython_func.step_into_functions
return False
@default_selected_gdb_frame(err=False)
def print_stackframe(self, frame, index, is_c=False):
"""
Print a C, Cython or Python stack frame and the line of source code
if available.
"""
# do this to prevent the require_cython_frame decorator from
# raising GdbError when calling self.cy.cy_cvalue.invoke()
selected_frame = gdb.selected_frame()
frame.select()
try:
source_desc, lineno = self.get_source_desc(frame)
except NoFunctionNameInFrameError:
print('#%-2d Unknown Frame (compile with -g)' % index)
return
if not is_c and self.is_python_function(frame):
pyframe = libpython.Frame(frame).get_pyop()
if pyframe is None or pyframe.is_optimized_out():
# print this python function as a C function
return self.print_stackframe(frame, index, is_c=True)
func_name = pyframe.co_name
func_cname = 'PyEval_EvalFrameEx'
func_args = []
elif self.is_cython_function(frame):
cyfunc = self.get_cython_function(frame)
f = lambda arg: self.cy.cy_cvalue.invoke(arg, frame=frame)
func_name = cyfunc.name
func_cname = cyfunc.cname
func_args = [] # [(arg, f(arg)) for arg in cyfunc.arguments]
else:
source_desc, lineno = self.get_source_desc(frame)
func_name = frame.name()
func_cname = func_name
func_args = []
try:
gdb_value = gdb.parse_and_eval(func_cname)
except RuntimeError:
func_address = 0
else:
func_address = gdb_value.address
if not isinstance(func_address, int):
# Seriously? Why is the address not an int?
if not isinstance(func_address, (str, bytes)):
func_address = str(func_address)
func_address = int(func_address.split()[0], 0)
a = ', '.join('%s=%s' % (name, val) for name, val in func_args)
sys.stdout.write('#%-2d 0x%016x in %s(%s)' % (index, func_address, func_name, a))
if source_desc.filename is not None:
sys.stdout.write(' at %s:%s' % (source_desc.filename, lineno))
sys.stdout.write('\n')
try:
sys.stdout.write(f' {source_desc.get_source(lineno)}\n')
except gdb.GdbError:
pass
selected_frame.select()
def get_remote_cython_globals_dict(self):
m = gdb.parse_and_eval('__pyx_m')
try:
PyModuleObject = gdb.lookup_type('PyModuleObject')
except RuntimeError:
raise gdb.GdbError(textwrap.dedent("""\
Unable to lookup type PyModuleObject, did you compile python
with debugging support (-g)?"""))
m = m.cast(PyModuleObject.pointer())
return m['md_dict']
def get_cython_globals_dict(self):
"""
Get the Cython globals dict where the remote names are turned into
local strings.
"""
remote_dict = self.get_remote_cython_globals_dict()
pyobject_dict = libpython.PyObjectPtr.from_pyobject_ptr(remote_dict)
result = {}
seen = set()
for k, v in pyobject_dict.iteritems():
result[k.proxyval(seen)] = v
return result
def print_gdb_value(self, name, value, max_name_length=None, prefix=''):
if libpython.pretty_printer_lookup(value):
typename = ''
else:
typename = '(%s) ' % (value.type,)
if max_name_length is None:
print('%s%s = %s%s' % (prefix, name, typename, value))
else:
print('%s%-*s = %s%s' % (prefix, max_name_length, name, typename, value))
def is_initialized(self, cython_func, local_name):
cyvar = cython_func.locals[local_name]
cur_lineno = self.get_cython_lineno()[1]
if '->' in cyvar.cname:
# Closed over free variable
if cur_lineno > cython_func.lineno:
if cyvar.type == PythonObject:
return int(gdb.parse_and_eval(cyvar.cname))
return True
return False
return cur_lineno > cyvar.lineno
| CythonBase |
python | huggingface__transformers | src/transformers/models/pvt_v2/modeling_pvt_v2.py | {
"start": 21125,
"end": 23568
} | class ____(PvtV2Model, BackboneMixin):
def __init__(self, config: PvtV2Config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = config.hidden_sizes
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0")
>>> model = AutoBackbone.from_pretrained(
... "OpenGVLab/pvt_v2_b0", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 256, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.encoder(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=return_dict,
)
hidden_states = outputs.hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = ["PvtV2ForImageClassification", "PvtV2Model", "PvtV2PreTrainedModel", "PvtV2Backbone"]
| PvtV2Backbone |
python | huggingface__transformers | src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py | {
"start": 14113,
"end": 15660
} | class ____(Dinov2ForImageClassification):
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.dinov2_with_registers(pixel_values, **kwargs)
sequence_output = outputs.last_hidden_state # batch_size, sequence_length, hidden_size
cls_token = sequence_output[:, 0]
# cls and register tokens should not be included in patch tokens variable
patch_tokens = sequence_output[:, 1 + self.config.num_register_tokens :]
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
logits = self.classifier(linear_input)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| Dinov2WithRegistersForImageClassification |
python | dask__distributed | distributed/client.py | {
"start": 197629,
"end": 201169
} | class ____(WorkerPlugin):
"""This is used to support older setup functions as callbacks"""
def __init__(self, setup):
self._setup = setup
def setup(self, worker):
if has_keyword(self._setup, "dask_worker"):
return self._setup(dask_worker=worker)
else:
return self._setup()
def CompatibleExecutor(*args, **kwargs):
raise Exception("This has been moved to the Client.get_executor() method")
ALL_COMPLETED = "ALL_COMPLETED"
FIRST_COMPLETED = "FIRST_COMPLETED"
async def _wait(fs, timeout=None, return_when=ALL_COMPLETED):
if timeout is not None and not isinstance(timeout, Number):
raise TypeError(
"timeout= keyword received a non-numeric value.\n"
"Beware that wait expects a list of values\n"
" Bad: wait(x, y, z)\n"
" Good: wait([x, y, z])"
)
fs = futures_of(fs)
if return_when == ALL_COMPLETED:
future = distributed.utils.All({f._state.wait() for f in fs})
elif return_when == FIRST_COMPLETED:
future = distributed.utils.Any({f._state.wait() for f in fs})
else:
raise NotImplementedError(
"Only return_when='ALL_COMPLETED' and 'FIRST_COMPLETED' are supported"
)
if timeout is not None:
future = wait_for(future, timeout)
await future
done, not_done = (
{fu for fu in fs if fu.status != "pending"},
{fu for fu in fs if fu.status == "pending"},
)
cancelled_errors = defaultdict(list)
for f in done:
if not f.cancelled():
continue
exception = f._state.exception
assert isinstance(exception, FutureCancelledError)
cancelled_errors[exception.reason].append(exception)
if cancelled_errors:
groups = [
CancelledFuturesGroup(errors=errors, reason=reason)
for reason, errors in cancelled_errors.items()
]
raise FuturesCancelledError(groups)
return DoneAndNotDoneFutures(done, not_done)
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait until all/any futures are finished
Parameters
----------
fs : List[Future]
timeout : number, string, optional
Time after which to raise a ``dask.distributed.TimeoutError``.
Can be a string like ``"10 minutes"`` or a number of seconds to wait.
return_when : str, optional
One of `ALL_COMPLETED` or `FIRST_COMPLETED`
Returns
-------
Named tuple of completed, not completed
"""
if timeout is not None and isinstance(timeout, (Number, str)):
timeout = parse_timedelta(timeout, default="s")
client = default_client()
result = client.sync(_wait, fs, timeout=timeout, return_when=return_when)
return result
async def _as_completed(fs, queue):
fs = futures_of(fs)
groups = groupby(lambda f: f.key, fs)
firsts = [v[0] for v in groups.values()]
wait_iterator = gen.WaitIterator(
*map(asyncio.ensure_future, [f._state.wait() for f in firsts])
)
while not wait_iterator.done():
await wait_iterator.next()
# TODO: handle case of restarted futures
future = firsts[wait_iterator.current_index]
for f in groups[future.key]:
queue.put_nowait(f)
async def _first_completed(futures):
"""Return a single completed future
See Also:
_as_completed
"""
q = asyncio.Queue()
await _as_completed(futures, q)
result = await q.get()
return result
| _WorkerSetupPlugin |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/llm_config.py | {
"start": 19350,
"end": 19554
} | class ____(BaseModelExtended):
model_id: str
max_total_tokens: Optional[int]
local_path: str
# this is a per process id assigned to the model
lora_assigned_int_id: int
| DiskMultiplexConfig |
python | keras-team__keras | keras/src/metrics/accuracy_metrics.py | {
"start": 8482,
"end": 11338
} | class ____(reduction_metrics.MeanMetricWrapper):
"""Calculates how often predictions match integer labels.
```python
acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
```
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `sparse categorical accuracy`: an
idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name="sparse_categorical_accuracy", dtype=None):
super().__init__(fn=sparse_categorical_accuracy, name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.top_k_categorical_accuracy")
def top_k_categorical_accuracy(y_true, y_pred, k=5):
reshape_matches = False
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true = ops.argmax(y_true, axis=-1)
y_true_rank = len(y_true.shape)
y_pred_rank = len(y_pred.shape)
y_true_org_shape = ops.shape(y_true)
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
reshape_matches = True
y_true = ops.reshape(y_true, [-1])
matches = ops.cast(
ops.in_top_k(ops.cast(y_true, "int32"), y_pred, k=k),
dtype=backend.floatx(),
)
# returned matches is expected to have same shape as y_true input
if reshape_matches:
matches = ops.reshape(matches, y_true_org_shape)
return matches
@keras_export("keras.metrics.TopKCategoricalAccuracy")
| SparseCategoricalAccuracy |
python | wandb__wandb | wandb/_pydantic/pagination.py | {
"start": 1261,
"end": 1355
} | class ____(Connection[NodeT], Generic[NodeT]):
total_count: NonNegativeInt
| ConnectionWithTotal |
python | great-expectations__great_expectations | tests/integration/fixtures/partition_and_sample_data/partitioner_test_cases_and_fixtures.py | {
"start": 8980,
"end": 9546
} | class ____(TaxiPartitioningTestCasesBase):
@override
def test_cases(self) -> List[TaxiPartitioningTestCase]:
return [
TaxiPartitioningTestCase(
table_domain_test_case=True,
num_expected_batch_definitions=1,
num_expected_rows_in_first_batch_definition=360,
expected_column_values=[],
add_batch_definition_method_name="add_batch_definition_whole_table",
add_batch_definition_kwargs={},
),
]
| TaxiPartitioningTestCasesWholeTable |
python | kamyu104__LeetCode-Solutions | Python/beautiful-arrangement-ii.py | {
"start": 29,
"end": 491
} | class ____(object):
def constructArray(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[int]
"""
result = []
left, right = 1, n
while left <= right:
if k % 2:
result.append(left)
left += 1
else:
result.append(right)
right -= 1
if k > 1:
k -= 1
return result
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec42.py | {
"start": 466,
"end": 1286
} | class ____(Generic[P, R]):
def __init__(self, func: Callable[P, R]):
self._func = func
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
return self._func(*args, **kwargs)
def other(self, val: int, *args: P.args, **kwargs: P.kwargs) -> R: ...
decorated_func1 = DecoratorClass1(func1)
reveal_type(
decorated_func1,
expected_text="DecoratorClass1[(a: S@func1, b: T@func1), dict[S@func1, T@func1]]",
)
func1_ret = decorated_func1(1, "")
reveal_type(func1_ret, expected_text="dict[int, str]")
func1_other_ret = decorated_func1.other(0, 1, "")
reveal_type(func1_other_ret, expected_text="dict[int, str]")
def func2(func: Callable[P, R]) -> Callable[P, R]: ...
d1 = func2(func1)
d2 = func2(d1)
d3 = d2(1, "")
reveal_type(d3, expected_text="dict[int, str]")
| DecoratorClass1 |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/dagster/dagster_pipes/dagster_pipes_details_and_customization/custom_context_loader.py | {
"start": 225,
"end": 582
} | class ____(PipesContextLoader):
@contextmanager
def load_context(self, params: PipesParams) -> Iterator[PipesContextData]:
# params were yielded by the above context injector and sourced from the bootstrap payload
key = params["key"]
data = cloud_service.read(key)
yield json.loads(data)
| MyCustomCloudServiceContextLoader |
python | pytorch__pytorch | test/distributed/pipelining/schedule_registry.py | {
"start": 4983,
"end": 6239
} | class ____(_PipelineScheduleRuntime):
n_stages = 2
num_microbatches = 2
rank_stages = {
0: [0],
1: [1],
}
def __init__(
self,
stages: list[_PipelineStageBase],
n_microbatches: int,
loss_fn: Optional[Callable] = None,
scale_grads: bool = True,
):
super().__init__(
stages=stages,
n_microbatches=n_microbatches,
loss_fn=loss_fn,
scale_grads=scale_grads,
)
# Go through two microbatches
self.pipeline_order_with_comms = {
0: [
_Action(0, F, 0),
_Action(0, F, 1),
_Action(0, SEND_F, 0),
_Action(0, SEND_F, 1),
_Action(0, RECV_B, 0),
_Action(0, RECV_B, 1),
_Action(0, B, 0),
_Action(0, B, 1),
],
1: [
_Action(1, RECV_F, 0),
_Action(1, RECV_F, 1),
_Action(1, F, 0),
_Action(1, F, 1),
_Action(1, B, 0),
_Action(1, B, 1),
_Action(1, SEND_B, 0),
_Action(1, SEND_B, 1),
],
}
| ScheduleWithReorderedB |
python | realpython__materials | django-migrations/bitcoin_tracker/historical_data/models.py | {
"start": 31,
"end": 245
} | class ____(models.Model):
date = models.DateTimeField(auto_now_add=True)
price = models.DecimalField(max_digits=7, decimal_places=2)
volume = models.DecimalField(max_digits=7, decimal_places=3)
| PriceHistory |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1020946,
"end": 1021697
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateEnterpriseMembersCanDeleteIssuesSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "enterprise", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise")
"""The enterprise with the updated members can delete issues setting."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the members can delete
issues setting.
"""
| UpdateEnterpriseMembersCanDeleteIssuesSettingPayload |
python | pytorch__pytorch | test/dynamo/test_list.py | {
"start": 229,
"end": 5248
} | class ____(torch._dynamo.test_case.TestCase):
# Tuple methods
# + count
# + index
# BinOps:
# +, <, >, <=, >=, ==, !=
# Dunder methods:
# + __getitem__
# + __contains__
# + __delitem__
thetype = tuple
def setUp(self):
self.old = torch._dynamo.config.enable_trace_unittest
torch._dynamo.config.enable_trace_unittest = True
super().setUp()
def tearDown(self):
torch._dynamo.config.enable_trace_unittest = self.old
return super().tearDown()
def assertEqual(self, a, b):
return self.assertTrue(a == b, f"{a} != {b}")
def assertNotEqual(self, x, y, msg=None, *, atol=None, rtol=None, **kwargs):
return self.assertTrue(x != y, f"{x} == {y}")
@make_dynamo_test
def test_count(self):
p = self.thetype("abcab")
self.assertEqual(p.count("a"), 2)
self.assertEqual(p.count("ab"), 0)
# Wrong number of arguments
self.assertRaises(TypeError, p.count)
self.assertRaises(TypeError, p.count, 2, 3)
@make_dynamo_test
def test_index(self):
p = self.thetype("abc")
self.assertEqual(p.index("a"), 0)
self.assertRaises(ValueError, p.index, "e")
# Wrong number of arguments
self.assertRaises(TypeError, p.index)
@make_dynamo_test
def test_binop_imul(self):
p = self.thetype([1, 2, 3])
r = p.__mul__(2)
self.assertIsInstance(r, self.thetype)
self.assertEqual(r, self.thetype([1, 2, 3, 1, 2, 3]))
self.assertEqual(p, self.thetype([1, 2, 3]))
# Wrong number of arguments
self.assertRaises(TypeError, p.__mul__)
# can only multiply list by an integer
self.assertRaises(TypeError, p.__mul__, 2.2)
@make_dynamo_test
def test_binop_add(self):
p, q = map(self.thetype, ["abc", "bcd"])
self.assertIsInstance(p + q, self.thetype)
self.assertEqual(p + q, self.thetype("abcbcd"))
self.assertEqual(p.__add__(q), self.thetype("abcbcd"))
# Wrong number of arguments
self.assertRaises(TypeError, p.__add__)
# can only concatenate items of the same type
self.assertRaises(TypeError, p.__add__, dict.fromkeys(q))
@make_dynamo_test
def test_cmp_eq(self):
p, q, r = map(self.thetype, ["ab", "abc", "ab"])
self.assertTrue(p == p)
self.assertTrue(p == r)
self.assertEqual(p, p)
self.assertEqual(p, r)
self.assertNotEqual(p, q)
self.assertTrue(p.__eq__(r))
# Wrong number of arguments
self.assertRaises(TypeError, p.__eq__)
@make_dynamo_test
def test_cmp_ne(self):
p, q = map(self.thetype, ["ab", "abc"])
self.assertTrue(p != q)
self.assertNotEqual(p, q)
self.assertTrue(p.__ne__(q))
# Wrong number of arguments
self.assertRaises(TypeError, p.__ne__)
@make_dynamo_test
def test_cmp_less_than(self):
p, q = map(self.thetype, ["ab", "abc"])
self.assertTrue(p < q)
self.assertTrue(p.__lt__(q))
self.assertFalse(q < p)
# Wrong number of arguments
self.assertRaises(TypeError, p.__lt__)
@make_dynamo_test
def test_cmp_greater_than(self):
p, q = map(self.thetype, ["ab", "abc"])
self.assertTrue(q > p)
self.assertTrue(q.__gt__(p))
self.assertFalse(p > q)
# Wrong number of arguments
self.assertRaises(TypeError, p.__gt__)
@make_dynamo_test
def test_cmp_less_than_or_equal(self):
p, q = map(self.thetype, ["ab", "abc"])
self.assertTrue(p <= q)
self.assertTrue(p.__le__(q))
self.assertFalse(q <= p)
# Wrong number of arguments
self.assertRaises(TypeError, p.__le__)
@make_dynamo_test
def test_cmp_greater_than_or_equal(self):
p, q = map(self.thetype, ["ab", "abc"])
self.assertTrue(q >= p)
self.assertTrue(q.__ge__(p))
self.assertFalse(p >= q)
# Wrong number of arguments
self.assertRaises(TypeError, p.__ge__)
@make_dynamo_test
def test___getitem__(self):
p = self.thetype("abc")
self.assertEqual(p.__getitem__(2), "c")
self.assertRaises(IndexError, p.__getitem__, 10)
# Wrong number of arguments
self.assertRaises(TypeError, p.__getitem__)
self.assertRaises(TypeError, p.__getitem__, 1, 2)
@make_dynamo_test
def test___contains__(self):
p = self.thetype("abc")
self.assertTrue(p.__contains__("a"))
self.assertIsInstance(p.__contains__("c"), bool)
# Wrong number of arguments
self.assertRaises(TypeError, p.__contains__)
self.assertRaises(TypeError, p.__contains__, 1, 2)
@make_dynamo_test
def test___iter__(self):
p = self.thetype([1])
it = p.__iter__()
self.assertEqual(next(it), 1)
it = p.__iter__().__iter__()
self.assertEqual(next(it), 1)
| TupleTests |
python | ray-project__ray | rllib/examples/envs/classes/windy_maze_env.py | {
"start": 463,
"end": 2699
} | class ____(gym.Env):
def __init__(self, env_config):
self.map = [m for m in MAP_DATA.split("\n") if m]
self.x_dim = len(self.map)
self.y_dim = len(self.map[0])
logger.info("Loaded map {} {}".format(self.x_dim, self.y_dim))
for x in range(self.x_dim):
for y in range(self.y_dim):
if self.map[x][y] == "S":
self.start_pos = (x, y)
elif self.map[x][y] == "F":
self.end_pos = (x, y)
logger.info("Start pos {} end pos {}".format(self.start_pos, self.end_pos))
self.observation_space = Tuple(
[
Box(0, 100, shape=(2,)), # (x, y)
Discrete(4), # wind direction (N, E, S, W)
]
)
self.action_space = Discrete(2) # whether to move or not
def reset(self, *, seed=None, options=None):
self.wind_direction = random.choice([0, 1, 2, 3])
self.pos = self.start_pos
self.num_steps = 0
return [[self.pos[0], self.pos[1]], self.wind_direction], {}
def step(self, action):
if action == 1:
self.pos = self._get_new_pos(self.pos, self.wind_direction)
self.num_steps += 1
self.wind_direction = random.choice([0, 1, 2, 3])
at_goal = self.pos == self.end_pos
truncated = self.num_steps >= 200
done = at_goal or truncated
return (
[[self.pos[0], self.pos[1]], self.wind_direction],
100 * int(at_goal),
done,
truncated,
{},
)
def _get_new_pos(self, pos, direction):
if direction == 0:
new_pos = (pos[0] - 1, pos[1])
elif direction == 1:
new_pos = (pos[0], pos[1] + 1)
elif direction == 2:
new_pos = (pos[0] + 1, pos[1])
elif direction == 3:
new_pos = (pos[0], pos[1] - 1)
if (
new_pos[0] >= 0
and new_pos[0] < self.x_dim
and new_pos[1] >= 0
and new_pos[1] < self.y_dim
and self.map[new_pos[0]][new_pos[1]] != "#"
):
return new_pos
else:
return pos # did not move
| WindyMazeEnv |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_compute.py | {
"start": 21994,
"end": 25518
} | class ____:
@mock.patch(COMPUTE_ENGINE_HOOK_PATH)
def test_instance_stop_should_execute_successfully(self, mock_hook):
op = ComputeEngineStopInstanceOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_RESOURCE_ID,
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
context = mock.MagicMock()
op.execute(context=context)
mock_hook.assert_called_once_with(
api_version="v1",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.stop_instance.assert_called_once_with(
zone=GCE_ZONE, resource_id=GCE_RESOURCE_ID, project_id=GCP_PROJECT_ID
)
# Setting all the operator's input parameters as templated dag_ids
# (could be anything else) just to test if the templating works for all fields
@pytest.mark.db_test
@mock.patch(COMPUTE_ENGINE_HOOK_PATH)
def test_instance_stop_with_templates(self, _, create_task_instance_of_operator, session):
dag_id = "test_instance_stop_with_templates"
ti = create_task_instance_of_operator(
ComputeEngineStopInstanceOperator,
dag_id=dag_id,
project_id="{{ dag.dag_id }}",
zone="{{ dag.dag_id }}",
resource_id="{{ dag.dag_id }}",
gcp_conn_id="{{ dag.dag_id }}",
api_version="{{ dag.dag_id }}",
task_id="id",
)
session.add(ti)
session.commit()
ti.render_templates()
assert dag_id == ti.task.project_id
assert dag_id == ti.task.zone
assert dag_id == ti.task.resource_id
assert dag_id == ti.task.gcp_conn_id
assert dag_id == ti.task.api_version
def test_instance_stop_should_throw_ex_when_missing_project_id(self):
with pytest.raises(AirflowException, match=r"The required parameter 'project_id' is missing"):
ComputeEngineStopInstanceOperator(
project_id="", zone=GCE_ZONE, resource_id=GCE_RESOURCE_ID, task_id="id"
)
@mock.patch(COMPUTE_ENGINE_HOOK_PATH)
def test_instance_stop_should_not_throw_ex_when_project_id_none(self, mock_hook):
op = ComputeEngineStopInstanceOperator(zone=GCE_ZONE, resource_id=GCE_RESOURCE_ID, task_id="id")
context = mock.MagicMock()
op.execute(context=context)
mock_hook.assert_called_once_with(
api_version="v1",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.stop_instance.assert_called_once_with(
zone=GCE_ZONE, resource_id=GCE_RESOURCE_ID, project_id=None
)
def test_instance_stop_should_throw_ex_when_missing_zone(self):
with pytest.raises(AirflowException, match=r"The required parameter 'zone' is missing"):
ComputeEngineStopInstanceOperator(
project_id=GCP_PROJECT_ID, zone="", resource_id=GCE_RESOURCE_ID, task_id="id"
)
@mock.patch(COMPUTE_ENGINE_HOOK_PATH)
def test_instance_stop_should_throw_ex_when_missing_resource_id(self, mock_hook):
with pytest.raises(AirflowException, match=r"The required parameter 'resource_id' is missing"):
ComputeEngineStopInstanceOperator(
project_id=GCP_PROJECT_ID, zone=GCE_ZONE, resource_id="", task_id="id"
)
| TestGceInstanceStop |
python | pandas-dev__pandas | pandas/plotting/_matplotlib/converter.py | {
"start": 5696,
"end": 8336
} | class ____(mdates.DateConverter):
@staticmethod
def convert(values, unit, axis: Axis):
# Reached via e.g. `ax.set_xlim`
# In tests as of 2025-09-24, unit is always None except for 3 tests
# that directly call this with unit="";
# axis is always specifically a matplotlib.axis.XAxis
if not hasattr(axis, "freq"):
raise TypeError("Axis must have `freq` set to convert to Periods")
freq = to_offset(axis.freq, is_period=True) # pyright: ignore[reportAttributeAccessIssue]
return PeriodConverter.convert_from_freq(values, freq)
@staticmethod
def convert_from_freq(values, freq: BaseOffset):
if is_nested_list_like(values):
values = [PeriodConverter._convert_1d(v, freq) for v in values]
else:
values = PeriodConverter._convert_1d(values, freq)
return values
@staticmethod
def _convert_1d(values, freq: BaseOffset):
valid_types = (str, datetime, Period, pydt.date, np.datetime64)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Period with BDay freq is deprecated", category=FutureWarning
)
warnings.filterwarnings(
"ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning
)
if (
isinstance(values, valid_types)
or is_integer(values)
or is_float(values)
):
return _get_datevalue(values, freq)
elif isinstance(values, PeriodIndex):
return values.asfreq(freq).asi8
elif isinstance(values, Index):
return values.map(lambda x: _get_datevalue(x, freq))
elif lib.infer_dtype(values, skipna=False) == "period":
# https://github.com/pandas-dev/pandas/issues/24304
# convert ndarray[period] -> PeriodIndex
return PeriodIndex(values, freq=freq).asi8
elif isinstance(values, (list, tuple, np.ndarray)):
return [_get_datevalue(x, freq) for x in values]
return values
def _get_datevalue(date, freq: BaseOffset):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
elif isinstance(date, (str, datetime, pydt.date, np.datetime64)):
return Period(date, freq).ordinal # pyright: ignore[reportAttributeAccessIssue]
elif is_integer(date) or is_float(date):
return date
elif date is None:
return None
raise ValueError(f"Unrecognizable date '{date}'")
# Datetime Conversion
| PeriodConverter |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis25.py | {
"start": 314,
"end": 1456
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis25.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [47471232, 48509696]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"num_format": "[$¥-411]#,##0.00"})
chart.set_y_axis({"num_format": "0.00%"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | tests/quantization/autoawq/test_awq.py | {
"start": 22450,
"end": 23086
} | class ____(unittest.TestCase):
model_name = "TechxGenus/starcoder2-3b-AWQ"
def test_load_quantized_model(self):
from awq.modules.act import ScaledActivation
"""
Simple test that checks if the scales have been replaced in the quantized model
"""
quantized_model = AutoModelForCausalLM.from_pretrained(
"TechxGenus/starcoder2-3b-AWQ", dtype=torch.float16, device_map=torch_device
)
self.assertTrue(isinstance(quantized_model.model.layers[0].mlp.act, ScaledActivation))
@slow
@require_auto_awq
@require_accelerate
@require_intel_extension_for_pytorch
| AwqScaleTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.