language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/array-partition-i.py | {
"start": 849,
"end": 1073
} | class ____(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = sorted(nums)
return sum([nums[i] for i in range(0, len(nums), 2)])
| Solution3 |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/logging_ops_test.py | {
"start": 1457,
"end": 2464
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testAssertDivideByZero(self):
with self.cached_session() as sess:
epsilon = ops.convert_to_tensor(1e-20)
x = ops.convert_to_tensor(0.0)
y = ops.convert_to_tensor(1.0)
z = ops.convert_to_tensor(2.0)
# assert(epsilon < y)
# z / y
with sess.graph.control_dependencies([
control_flow_assert.Assert(
math_ops.less(epsilon, y), ["Divide-by-zero"])
]):
out = math_ops.div(z, y)
self.assertAllEqual(2.0, self.evaluate(out))
# assert(epsilon < x)
# z / x
#
# This tests printing out multiple tensors
with sess.graph.control_dependencies([
control_flow_assert.Assert(
math_ops.less(epsilon, x), ["Divide-by-zero", "less than x"])
]):
out = math_ops.div(z, x)
with self.assertRaisesOpError("less than x"):
self.evaluate(out)
@test_util.run_all_in_graph_and_eager_modes
| LoggingOpsTest |
python | pytorch__pytorch | test/onnx/test_custom_ops.py | {
"start": 2631,
"end": 3876
} | class ____(pytorch_test_common.ExportTestCase):
opset_version = 14
keep_initializers_as_inputs = False
onnx_shape_inference = True
def test_contrib_op_with_loop(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.gelu = torch.nn.GELU(approximate="none")
def forward(self, x):
res = []
res2 = []
for _ in range(x.size(0)):
if len(res) > 0:
res2.append(res[0])
else:
res2.append(self.gelu(x[0]))
res.append(x[0])
return torch.stack(res), torch.stack(res2)
def symbolic_custom_gelu(g, input, approximate):
return g.op("com.microsoft::Gelu", input).setType(input.type())
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("::gelu", symbolic_custom_gelu, 1)
x = torch.randn(3, 3, 4, requires_grad=True)
model = torch.jit.script(M())
onnx_test_common.run_model_test(self, model, input_args=(x,))
if __name__ == "__main__":
common_utils.run_tests()
| TestExportAsContribOps |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/__init__.py | {
"start": 17174,
"end": 17311
} | class ____(Asset):
"""A representation of dataset dependencies between workflows."""
asset_type: ClassVar[str] = "dataset"
| Dataset |
python | kamyu104__LeetCode-Solutions | Python/maximum-69-number.py | {
"start": 387,
"end": 566
} | class ____(object):
def maximum69Number (self, num):
"""
:type num: int
:rtype: int
"""
return int(str(num).replace('6', '9', 1))
| Solution2 |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/kinesis_analytics.py | {
"start": 1416,
"end": 3355
} | class ____(AwsBaseSensor[KinesisAnalyticsV2Hook]):
"""
General sensor behaviour for AWS Managed Service for Apache Flink.
Subclasses must set the following fields:
- ``INTERMEDIATE_STATES``
- ``FAILURE_STATES``
- ``SUCCESS_STATES``
- ``FAILURE_MESSAGE``
- ``SUCCESS_MESSAGE``
:param application_name: Application name.
:param deferrable: If True, the sensor will operate in deferrable mode. This mode requires aiobotocore
module to be installed.
(default: False, but can be overridden in config file by setting default_deferrable to True)
"""
aws_hook_class = KinesisAnalyticsV2Hook
ui_color = "#66c3ff"
INTERMEDIATE_STATES: tuple[str, ...] = ()
FAILURE_STATES: tuple[str, ...] = ()
SUCCESS_STATES: tuple[str, ...] = ()
FAILURE_MESSAGE = ""
SUCCESS_MESSAGE = ""
def __init__(
self,
application_name: str,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
):
super().__init__(**kwargs)
self.application_name = application_name
self.deferrable = deferrable
def poke(self, context: Context, **kwargs) -> bool:
status = self.hook.conn.describe_application(ApplicationName=self.application_name)[
"ApplicationDetail"
]["ApplicationStatus"]
self.log.info(
"Poking for AWS Managed Service for Apache Flink application: %s status: %s",
self.application_name,
status,
)
if status in self.FAILURE_STATES:
raise AirflowException(self.FAILURE_MESSAGE)
if status in self.SUCCESS_STATES:
self.log.info(
"%s `%s`.",
self.SUCCESS_MESSAGE,
self.application_name,
)
return True
return False
| KinesisAnalyticsV2BaseSensor |
python | huggingface__transformers | src/transformers/models/flex_olmo/modular_flex_olmo.py | {
"start": 10584,
"end": 10636
} | class ____(Olmo2Attention):
pass
| FlexOlmoAttention |
python | tensorflow__tensorflow | tensorflow/python/framework/test_combinations.py | {
"start": 2385,
"end": 4360
} | class ____:
"""Customize the behavior of `generate()` and the tests that it executes.
Here is sequence of steps for executing a test combination:
1. The test combination is evaluated for whether it should be executed in
the given environment by calling `should_execute_combination`.
2. If the test combination is going to be executed, then the arguments for
all combined parameters are validated. Some arguments can be handled in
a special way. This is achieved by implementing that logic in
`ParameterModifier` instances that returned from `parameter_modifiers`.
3. Before executing the test, `context_managers` are installed
around it.
"""
def should_execute_combination(self, kwargs):
"""Indicates whether the combination of test arguments should be executed.
If the environment doesn't satisfy the dependencies of the test
combination, then it can be skipped.
Args:
kwargs: Arguments that are passed to the test combination.
Returns:
A tuple boolean and an optional string. The boolean False indicates
that the test should be skipped. The string would indicate a textual
description of the reason. If the test is going to be executed, then
this method returns `None` instead of the string.
"""
del kwargs
return (True, None)
def parameter_modifiers(self):
"""Returns `ParameterModifier` instances that customize the arguments."""
return []
def context_managers(self, kwargs):
"""Return context managers for running the test combination.
The test combination will run under all context managers that all
`TestCombination` instances return.
Args:
kwargs: Arguments and their values that are passed to the test
combination.
Returns:
A list of instantiated context managers.
"""
del kwargs
return []
@tf_export("__internal__.test.combinations.ParameterModifier", v1=[])
| TestCombination |
python | FactoryBoy__factory_boy | tests/alchemyapp/models.py | {
"start": 987,
"end": 1098
} | class ____(Base):
__tablename__ = 'NonIntegerPk'
id = Column(Unicode(20), primary_key=True)
| NonIntegerPk |
python | huggingface__transformers | src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py | {
"start": 3552,
"end": 9067
} | class ____(nn.Module):
"""
Construct the CLS token, mask token, register tokens, position and patch embeddings.
"""
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
with the original implementation.
Adapted from:
- https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
- https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# Skip interpolation for matching dimensions (unless tracing)
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
# Handle class token and patch embeddings separately
class_pos_embed = self.position_embeddings[:, 0]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
# Calculate new dimensions
height = height // self.config.patch_size
width = width // self.config.patch_size
# Reshape for interpolation
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
# Store original dtype for restoration after interpolation
target_dtype = patch_pos_embed.dtype
# Interpolate at float32 precision
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.to(dtype=torch.float32),
size=(torch_int(height), torch_int(width)), # Explicit size instead of scale_factor
mode="bicubic",
align_corners=False,
antialias=True,
).to(dtype=target_dtype)
# Validate output dimensions if not tracing
if not torch.jit.is_tracing():
if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
raise ValueError("Width or height does not match with the interpolated position embeddings")
# Reshape back to original format
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
# Combine class and patch embeddings
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
if bool_masked_pos is not None:
embeddings = torch.where(
bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
)
# add the [CLS] token to the embedded patch tokens
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add positional encoding to each token
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
# add register tokens
embeddings = torch.cat(
(embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1
)
embeddings = self.dropout(embeddings)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Dinov2WithRegistersEmbeddings |
python | sqlalchemy__sqlalchemy | test/orm/test_lockmode.py | {
"start": 401,
"end": 1860
} | class ____(_fixtures.FixtureTest):
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
cls.mapper_registry.map_imperatively(User, users)
def _assert(
self,
read=False,
nowait=False,
of=None,
key_share=None,
assert_q_of=None,
assert_sel_of=None,
):
User = self.classes.User
s = fixture_session()
q = s.query(User).with_for_update(
read=read, nowait=nowait, of=of, key_share=key_share
)
sel = q._compile_state().statement
assert q._for_update_arg.read is read
assert sel._for_update_arg.read is read
assert q._for_update_arg.nowait is nowait
assert sel._for_update_arg.nowait is nowait
assert q._for_update_arg.key_share is key_share
assert sel._for_update_arg.key_share is key_share
eq_(q._for_update_arg.of, assert_q_of)
eq_(sel._for_update_arg.of, assert_sel_of)
def test_key_share(self):
self._assert(key_share=True)
def test_read(self):
self._assert(read=True)
def test_plain(self):
self._assert()
def test_nowait(self):
self._assert(nowait=True)
def test_of_single_col(self):
User, users = self.classes.User, self.tables.users
self._assert(
of=User.id, assert_q_of=[users.c.id], assert_sel_of=[users.c.id]
)
| ForUpdateTest |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 29443,
"end": 30041
} | class ____(state_types.Transform):
device_id: Any
device_id_type: pallas_primitives.DeviceIdType
def transform_shape(self, shape):
return shape
def transform_dtype(self, dtype):
return dtype
def untransform_index(
self, idxs: tuple[Index, ...]
) -> tuple[tuple[Index, ...], state_types.Transform]:
return idxs, self
def tree_flatten(self):
return (self.device_id,), (self.device_id_type,)
@classmethod
def tree_unflatten(cls, metadata, arrays):
return cls(arrays[0], metadata[0])
@tree_util.register_pytree_node_class
@dataclasses.dataclass
| PeerMemRef |
python | pappasam__jedi-language-server | jedi_language_server/notebook_utils.py | {
"start": 1626,
"end": 1765
} | class ____(NamedTuple):
"""A text edit in a document."""
uri: str
text_edit: Union[TextEdit, AnnotatedTextEdit]
| DocumentTextEdit |
python | chroma-core__chroma | chromadb/rate_limit/simple_rate_limit/__init__.py | {
"start": 283,
"end": 723
} | class ____(RateLimitEnforcer):
"""
A naive implementation of a rate limit enforcer that allows all requests.
"""
def __init__(self, system: System) -> None:
super().__init__(system)
@override
def rate_limit(self, func: T) -> T:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
return func(*args, **kwargs)
return wrapper # type: ignore
| SimpleRateLimitEnforcer |
python | huggingface__transformers | src/transformers/models/phi3/modeling_phi3.py | {
"start": 2836,
"end": 9509
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Phi3Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Phi3Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
q_embed = torch.cat([(q_rot * cos) + (rotate_half(q_rot) * sin), q_pass], dim=-1)
k_embed = torch.cat([(k_rot * cos) + (rotate_half(k_rot) * sin), k_pass], dim=-1)
return q_embed, k_embed
| Phi3RotaryEmbedding |
python | walkccc__LeetCode | solutions/384. Shuffle an Array/384.py | {
"start": 0,
"end": 317
} | class ____:
def __init__(self, nums: list[int]):
self.nums = nums
def reset(self) -> list[int]:
return self.nums
def shuffle(self) -> list[int]:
arr = self.nums.copy()
for i in range(len(arr) - 1, 0, -1):
j = random.randint(0, i)
arr[i], arr[j] = arr[j], arr[i]
return arr
| Solution |
python | falconry__falcon | falcon/routing/converters.py | {
"start": 6752,
"end": 7816
} | class ____(BaseConverter):
"""Field converted used to match the rest of the path.
This field converter matches the remainder of the URL path,
returning it as a string.
This converter is currently supported only when used at the
end of the URL template.
The classic routing rules of falcon apply also to this converter:
considering the template ``'/foo/bar/{matched_path:path}'``, the path
``'/foo/bar'`` will *not* match the route; ``'/foo/bar/'`` will
match, producing ``matched_path=''``, when
:attr:`~falcon.RequestOptions.strip_url_path_trailing_slash` is ``False``
(the default), while it will *not* match when that option is ``True``.
(See also: :ref:`trailing_slash_in_path`)
.. versionadded:: 4.0
"""
CONSUME_MULTIPLE_SEGMENTS = True
def convert(self, value: Iterable[str]) -> str:
return '/'.join(value)
BUILTIN = (
('int', IntConverter),
('dt', DateTimeConverter),
('uuid', UUIDConverter),
('float', FloatConverter),
('path', PathConverter),
)
| PathConverter |
python | pyca__cryptography | src/cryptography/hazmat/primitives/_serialization.py | {
"start": 4648,
"end": 5123
} | class ____(KeySerializationEncryption):
def __init__(
self,
format: PrivateFormat,
password: bytes,
*,
kdf_rounds: int | None,
hmac_hash: HashAlgorithm | None,
key_cert_algorithm: PBES | None,
):
self._format = format
self.password = password
self._kdf_rounds = kdf_rounds
self._hmac_hash = hmac_hash
self._key_cert_algorithm = key_cert_algorithm
| _KeySerializationEncryption |
python | getsentry__sentry | src/sentry/migrations/0916_delete_open_period_rows.py | {
"start": 776,
"end": 2102
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0915_add_user_email_unique_column"),
]
operations = [
migrations.RunPython(
code=delete_open_periods,
reverse_code=migrations.RunPython.noop,
hints={"tables": ["sentry_groupopenperiod"]},
),
]
| Migration |
python | tornadoweb__tornado | tornado/options.py | {
"start": 17982,
"end": 19163
} | class ____:
"""`mock.patch` compatible wrapper for `OptionParser`.
As of ``mock`` version 1.0.1, when an object uses ``__getattr__``
hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete
the attribute it set instead of setting a new one (assuming that
the object does not capture ``__setattr__``, so the patch
created a new attribute in ``__dict__``).
_Mockable's getattr and setattr pass through to the underlying
OptionParser, and delattr undoes the effect of a previous setattr.
"""
def __init__(self, options: OptionParser) -> None:
# Modify __dict__ directly to bypass __setattr__
self.__dict__["_options"] = options
self.__dict__["_originals"] = {}
def __getattr__(self, name: str) -> Any:
return getattr(self._options, name)
def __setattr__(self, name: str, value: Any) -> None:
assert name not in self._originals, "don't reuse mockable objects"
self._originals[name] = getattr(self._options, name)
setattr(self._options, name, value)
def __delattr__(self, name: str) -> None:
setattr(self._options, name, self._originals.pop(name))
| _Mockable |
python | pytorch__pytorch | torch/testing/_internal/common_dist_composable.py | {
"start": 518,
"end": 886
} | class ____(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.l1 = nn.Linear(100, 100, device=device)
self.u1 = UnitModule(device)
self.u2 = UnitModule(device)
self.l2 = nn.Linear(100, 100, device=device)
def forward(self, x):
return self.l2(self.u2(self.u1(self.l1(x))))
| CompositeModel |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 30497,
"end": 31399
} | class ____(OrderedDictTests, __TestCase):
module = py_coll
OrderedDict = py_coll.OrderedDict
def test_issue119004_attribute_error(self):
with torch._dynamo.error_on_graph_break(False):
class Key(_TriggerSideEffectOnEqual):
def side_effect(self):
del dict1[TODEL]
TODEL = Key()
dict1 = self.OrderedDict(dict.fromkeys((0, TODEL, 4.2)))
dict2 = self.OrderedDict(dict.fromkeys((0, Key(), 4.2)))
# This causes an AttributeError due to the linked list being changed
msg = re.escape("'NoneType' object has no attribute 'key'")
self.assertRaisesRegex(AttributeError, msg, operator.eq, dict1, dict2)
self.assertEqual(Key.count, 2)
self.assertDictEqual(dict1, dict.fromkeys((0, 4.2)))
self.assertDictEqual(dict2, dict.fromkeys((0, Key(), 4.2)))
| PurePythonOrderedDictTests |
python | celery__celery | t/unit/backends/test_base.py | {
"start": 1744,
"end": 1958
} | class ____:
def test_create_exception_cls(self):
assert serialization.create_exception_cls('FooError', 'm')
assert serialization.create_exception_cls('FooError', 'm', KeyError)
| test_serialization |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 325,
"end": 3789
} | class ____(Field):
"""
Layout object for rendering a field with prepended and appended text.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
attrs : dict
Attributes to be applied to the field. These are converted into html
attributes. e.g. ``data_id: 'test'`` in the attrs dict will become
``data-id='test'`` on the field's ``<input>``.
Parameters
----------
field : str
The name of the field to be rendered.
prepended_text : str, optional
The prepended text, can be HTML like, by default None
appended_text : str, optional
The appended text, can be HTML like, by default None
input_size : str, optional
For Bootstrap4+ additional classes to customise the input-group size
e.g. ``input-group-sm``. By default None
active : bool
For Bootstrap3, a boolean to render the text active. By default
``False``.
css_class : str, optional
CSS classes to be applied to the field. These are added to any classes
included in the ``attrs`` dict. By default ``None``.
wrapper_class: str, optional
CSS classes to be used when rendering the Field. This class is usually
applied to the ``<div>`` which wraps the Field's ``<label>`` and
``<input>`` tags. By default ``None``.
template : str, optional
Overrides the default template, if provided. By default ``None``.
**kwargs : dict, optional
Additional attributes are converted into key="value", pairs. These
attributes are added to the ``<div>``.
Examples
--------
Example::
PrependedAppendedText('amount', '$', '.00')
"""
template = "%s/layout/prepended_appended_text.html"
def __init__(
self,
field,
prepended_text=None,
appended_text=None,
input_size=None,
*,
active=False,
css_class=None,
wrapper_class=None,
template=None,
**kwargs,
):
self.field = field
self.appended_text = appended_text
self.prepended_text = prepended_text
self.active = active
self.input_size = input_size
if css_class: # Bootstrap 3
if "input-lg" in css_class:
self.input_size = "input-lg"
if "input-sm" in css_class:
self.input_size = "input-sm"
super().__init__(field, css_class=css_class, wrapper_class=wrapper_class, template=template, **kwargs)
def render(self, form, context, template_pack=TEMPLATE_PACK, extra_context=None, **kwargs):
extra_context = extra_context.copy() if extra_context is not None else {}
extra_context.update(
{
"crispy_appended_text": self.appended_text,
"crispy_prepended_text": self.prepended_text,
"input_size": self.input_size,
"active": getattr(self, "active", False),
"wrapper_class": self.wrapper_class,
}
)
template = self.get_template_name(template_pack)
return render_field(
self.field,
form,
context,
template=template,
attrs=self.attrs,
template_pack=template_pack,
extra_context=extra_context,
**kwargs,
)
| PrependedAppendedText |
python | huggingface__transformers | src/transformers/models/instructblipvideo/modeling_instructblipvideo.py | {
"start": 11177,
"end": 12281
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: InstructBlipVideoConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = InstructBlipVideoAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = InstructBlipVideoMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
**kwargs,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
| InstructBlipVideoEncoderLayer |
python | kamyu104__LeetCode-Solutions | Python/best-time-to-buy-and-sell-stock-v.py | {
"start": 38,
"end": 694
} | class ____(object):
def maximumProfit(self, prices, k):
"""
:type prices: List[int]
:type k: int
:rtype: int
"""
dp = [0]*(len(prices)+1)
result = 0
for i in xrange(k):
x, y = float("-inf"), float("-inf")
new_dp = [float("-inf")]*(len(prices)+1)
for j in xrange(i, len(prices)):
x, y = max(x, dp[j]-prices[j]), max(y, dp[j]+prices[j])
new_dp[j+1] = max(new_dp[j], x+prices[j], y-prices[j])
dp = new_dp
result = max(result, dp[-1])
return result
# Time: O(n * k)
# Space: O(k)
# dp
| Solution |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/base.py | {
"start": 12631,
"end": 13871
} | class ____(
LLMManagerMixin,
ChainManagerMixin,
ToolManagerMixin,
RetrieverManagerMixin,
CallbackManagerMixin,
RunManagerMixin,
):
"""Base callback handler for LangChain."""
raise_error: bool = False
"""Whether to raise an error if an exception occurs."""
run_inline: bool = False
"""Whether to run the callback inline."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return False
@property
def ignore_retry(self) -> bool:
"""Whether to ignore retry callbacks."""
return False
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return False
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return False
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return False
@property
def ignore_chat_model(self) -> bool:
"""Whether to ignore chat model callbacks."""
return False
@property
def ignore_custom_event(self) -> bool:
"""Ignore custom event."""
return False
| BaseCallbackHandler |
python | pypa__pip | src/pip/_vendor/resolvelib/resolvers/exceptions.py | {
"start": 1336,
"end": 1599
} | class ____(ResolutionError, Generic[RT, CT]):
def __init__(self, causes: Collection[RequirementInformation[RT, CT]]):
super().__init__(causes)
# causes is a list of RequirementInformation objects
self.causes = causes
| ResolutionImpossible |
python | huggingface__transformers | src/transformers/models/donut/modeling_donut_swin.py | {
"start": 28789,
"end": 30938
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList(
[
DonutSwinLayer(
config=config,
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
drop_path_rate=drop_path[i],
shift_size=0 if (i % 2 == 0) else config.window_size // 2,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
always_partition: Optional[bool] = False,
) -> tuple[torch.Tensor]:
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_outputs = layer_module(hidden_states, input_dimensions, output_attentions, always_partition)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
# Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->DonutSwin
| DonutSwinStage |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/dist_autograd_test.py | {
"start": 39643,
"end": 48237
} | class ____(CommonDistAutogradTest):
# Sparse tests only work with TensorPipeAgent.
@dist_init
def test_graph_for_builtin_call_sparse(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_python_call_sparse(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_builtin_remote_call_sparse(self):
self._test_graph(torch.add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_python_remote_call_sparse(self):
self._test_graph(my_py_add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_py_nested_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, True)
@dist_init
def test_graph_for_py_nested_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True)
@dist_init
def test_rpc_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, True)
@dist_init
def test_remote_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.REMOTE, True)
@dist_init
def test_context_cleanup_tensor_with_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_nested_rpc_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_backward_no_grad_on_tensor_sparse(self):
self._backward_no_grad_on_tensor(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True,
)
@dist_init
def test_backward_simple_sparse(self):
self._backward_simple(
self._next_rank(),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True,
)
@dist_init
def test_backward_simple_self_sparse(self):
self._backward_simple(
self.rank,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True,
)
@dist_init
def test_backward_rref_multi_sparse(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True,
)
@dist_init
def test_backward_rref_sparse(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True,
)
@dist_init
def test_backward_rref_nested_sparse(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True,
)
@dist_init
def test_trainer_ps_sparse(self):
self._test_trainer_ps(build_sparse_tensor, _run_trainer, True)
@dist_init
def test_backward_multiple_round_trips_sparse(self):
self._backward_multiple_round_trips(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
None,
True,
)
@dist_init
def test_backward_different_dtypes_sparse(self):
self._backward_different_dtypes(
build_sparse_tensor(requires_grad=True, dtype=torch.float32),
build_sparse_tensor(requires_grad=True, dtype=torch.float64),
True,
)
@dist_init
def test_backward_simple_python_udf_sparse(self):
self._backward_simple_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True,
)
@dist_init
def test_backward_simple_script_call_sparse(self):
self._backward_simple_script_call(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True,
)
@dist_init
def test_nested_backward_accumulate_grads_sparse(self):
self._nested_backward_accumulate_grads(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True,
)
@dist_init
def test_backwards_nested_python_udf_sparse(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True,
)
@dist_init
def test_mixed_requires_grad_sparse(self):
self._mixed_requires_grad(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
True,
)
@dist_init
def test_multiple_backward_sparse(self):
self._multiple_backward(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True,
)
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad, remote_grad)
| TensorPipeAgentDistAutogradTest |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 4316,
"end": 4786
} | class ____(StreamlitAPIException, Warning):
"""Used to display a warning.
Note that this should not be "raised", but passed to st.exception
instead.
"""
def __init__(self, *args: Any) -> None:
super().__init__(*args)
import inspect
import traceback
f = inspect.currentframe()
self.tacked_on_stack = traceback.extract_stack(f)
def __repr__(self) -> str:
return util.repr_(self)
| StreamlitAPIWarning |
python | pytorch__pytorch | torch/utils/tensorboard/_pytorch_graph.py | {
"start": 3676,
"end": 13908
} | class ____:
"""Helper class to convert torch.nn.Module to GraphDef proto and visualization with TensorBoard.
GraphDef generation operates in two passes:
In the first pass, all nodes are read and saved to two lists.
One list is for input/output nodes (nodes_io), which only have inbound
or outbound connections, but not both. Another list is for internal
operator nodes (nodes_op). The first pass also saves all scope name
appeared in the nodes in scope_name_appeared list for later processing.
In the second pass, scope names are fully applied to all nodes.
debugNameToScopedName is a mapping from a node's ID to its fully qualified
scope name. e.g. Net1/Linear[0]/1. Unfortunately torch.jit doesn't have
totally correct scope output, so this is nontrivial. The function
populate_namespace_from_OP_to_IO and find_common_root are used to
assign scope name to a node based on the connection between nodes
in a heuristic kind of way. Bookkeeping is done with shallowest_scope_name
and scope_name_appeared.
"""
def __init__(self) -> None:
self.nodes_op = []
self.nodes_io = OrderedDict()
self.unique_name_to_scoped_name = {}
self.shallowest_scope_name = "default"
self.scope_name_appeared = []
def append(self, x) -> None:
if isinstance(x, NodePyIO):
self.nodes_io[x.debugName] = x
if isinstance(x, NodePyOP):
self.nodes_op.append(x)
def printall(self) -> None:
print("all nodes")
for node in self.nodes_op:
print(node)
for key in self.nodes_io:
print(self.nodes_io[key])
def find_common_root(self) -> None:
for fullscope in self.scope_name_appeared:
if fullscope:
self.shallowest_scope_name = fullscope.split("/")[0]
def populate_namespace_from_OP_to_IO(self) -> None:
for node in self.nodes_op:
for node_output, outputSize in zip(node.outputs, node.outputstensor_size, strict=True):
self.scope_name_appeared.append(node.scopeName)
self.nodes_io[node_output] = NodeBase(
node_output,
node.inputs,
node.scopeName,
outputSize,
op_type=node.kind,
attributes=node.attributes,
)
self.find_common_root()
for node in self.nodes_op:
for input_node_id in node.inputs:
self.unique_name_to_scoped_name[input_node_id] = (
node.scopeName + "/" + input_node_id
)
for key, node in self.nodes_io.items():
if type(node) is NodeBase:
# pyrefly: ignore [unsupported-operation]
self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName
if hasattr(node, "input_or_output"):
self.unique_name_to_scoped_name[key] = (
node.input_or_output + "/" + node.debugName
)
if hasattr(node, "scope") and node.scope is not None:
self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName
if node.scope == "" and self.shallowest_scope_name:
self.unique_name_to_scoped_name[node.debugName] = (
# pyrefly: ignore [unsupported-operation]
self.shallowest_scope_name + "/" + node.debugName
)
# replace name
for key, node in self.nodes_io.items():
self.nodes_io[key].inputs = [
self.unique_name_to_scoped_name[node_input_id]
for node_input_id in node.inputs
]
if node.debugName in self.unique_name_to_scoped_name:
self.nodes_io[key].debugName = self.unique_name_to_scoped_name[
node.debugName
]
def to_proto(self):
"""Convert graph representation of GraphPy object to TensorBoard required format."""
# TODO: compute correct memory usage and CPU time once
# PyTorch supports it
nodes = [
node_proto(
v.debugName,
input=v.inputs,
outputsize=v.tensor_size,
op=v.kind,
attributes=v.attributes,
)
for v in self.nodes_io.values()
]
return nodes
def parse(graph, trace, args=None, omit_useless_nodes=True):
"""Parse an optimized PyTorch model graph and produces a list of nodes and node stats.
Useful for eventual conversion to TensorBoard protobuf format.
Args:
graph (PyTorch module): The model graph to be parsed.
trace (PyTorch JIT TracedModule): The model trace to be parsed.
args (tuple): input tensor[s] for the model.
omit_useless_nodes (boolean): Whether to remove nodes from the graph.
"""
nodes_py = GraphPy()
for node in graph.inputs():
if omit_useless_nodes:
if (
len(node.uses()) == 0
): # number of user of the node (= number of outputs/ fanout)
continue
if node.type().kind() != CLASSTYPE_KIND:
nodes_py.append(NodePyIO(node, "input"))
attr_to_scope: dict[Any, str] = {}
for node in graph.nodes():
if node.kind() == GETATTR_KIND:
attr_name = node.s("name")
attr_key = node.output().debugName()
parent = node.input().node()
if (
parent.kind() == GETATTR_KIND
): # If the parent node is not the top-level "self" node
parent_attr_key = parent.output().debugName()
parent_scope = attr_to_scope[parent_attr_key]
attr_scope = parent_scope.split("/")[-1]
attr_to_scope[attr_key] = f"{parent_scope}/{attr_scope}.{attr_name}"
else:
attr_to_scope[attr_key] = f"__module.{attr_name}"
# We don't need classtype nodes; scope will provide this information
if node.output().type().kind() != CLASSTYPE_KIND:
node_py = NodePyOP(node)
node_py.scopeName = attr_to_scope[attr_key] # type: ignore[attr-defined]
nodes_py.append(node_py)
else:
nodes_py.append(NodePyOP(node))
for i, node in enumerate(graph.outputs()): # Create sink nodes for output ops
node_pyio = NodePyIO(node, "output")
node_pyio.debugName = f"output.{i + 1}"
node_pyio.inputs = [node.debugName()]
nodes_py.append(node_pyio)
def parse_traced_name(module):
if isinstance(module, torch.jit.TracedModule):
module_name = module._name
else:
module_name = getattr(module, "original_name", "Module")
return module_name
alias_to_name = {}
base_name = parse_traced_name(trace)
for name, module in trace.named_modules(prefix="__module"):
mod_name = parse_traced_name(module)
attr_name = name.split(".")[-1]
alias_to_name[name] = f"{mod_name}[{attr_name}]"
for node in nodes_py.nodes_op:
module_aliases = node.scopeName.split("/")
replacements = [
alias_to_name[alias] if alias in alias_to_name else alias.split(".")[-1]
for alias in module_aliases
]
node.scopeName = base_name
if any(replacements):
node.scopeName += "/" + "/".join(replacements)
nodes_py.populate_namespace_from_OP_to_IO()
return nodes_py.to_proto()
def graph(model, args, verbose=False, use_strict_trace=True):
"""
Process a PyTorch model and produces a `GraphDef` proto that can be logged to TensorBoard.
Args:
model (PyTorch module): The model to be parsed.
args (tuple): input tensor[s] for the model.
verbose (bool): Whether to print out verbose information while
processing.
use_strict_trace (bool): Whether to pass keyword argument `strict` to
`torch.jit.trace`. Pass False when you want the tracer to
record your mutable container types (list, dict)
"""
with _set_model_to_eval(model):
try:
trace = torch.jit.trace(model, args, strict=use_strict_trace)
graph = trace.graph
torch._C._jit_pass_inline(graph)
except RuntimeError as e:
print(e)
print("Error occurs, No graph saved")
raise e
if verbose:
print(graph)
list_of_nodes = parse(graph, trace, args)
# We are hardcoding that this was run on CPU even though it might have actually
# run on GPU. Note this is what is shown in TensorBoard and has no bearing
# on actual execution.
# TODO: See if we can extract GPU vs CPU information from the PyTorch model
# and pass it correctly to TensorBoard.
#
# Definition of StepStats and DeviceStepStats can be found at
# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/graph/tf_graph_common/proto.ts
# and
# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/step_stats.proto
stepstats = RunMetadata(
step_stats=StepStats(dev_stats=[DeviceStepStats(device="/device:CPU:0")])
)
return GraphDef(node=list_of_nodes, versions=VersionDef(producer=22)), stepstats
# The producer version has been reverse engineered from standard
# TensorBoard logged data.
@contextlib.contextmanager
def _set_model_to_eval(model):
"""Context manager to temporarily set the training mode of ``model`` to eval."""
if not isinstance(model, torch.jit.ScriptFunction):
originally_training = model.training
model.train(False)
try:
yield
finally:
model.train(originally_training)
else:
# Do nothing for ScriptFunction
try:
yield
finally:
pass
def _node_get(node: torch._C.Node, key: str):
"""Get attributes of a node which is polymorphic over return type."""
sel = node.kindOf(key)
return getattr(node, sel)(key)
| GraphPy |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 22927,
"end": 22997
} | class ____(TestChunkedApp):
chunks = [b'a' * 8192] * 3
| TestBigChunks |
python | pytorch__pytorch | torch/testing/_internal/common_distributed.py | {
"start": 24075,
"end": 40117
} | class ____(TestCase):
MAIN_PROCESS_RANK = -1
# This exit code is used to indicate that the test code had an error and
# exited abnormally. There are certain tests that might use sys.exit() to
# simulate failures and in those cases, we can't have an exit code of 0,
# but we still want to ensure we didn't run into any other errors.
TEST_ERROR_EXIT_CODE = 10
# do not early terminate for distributed tests.
def _should_stop_test_suite(self) -> bool:
return False
# Many test cases init a process group but do not destroy it. This property
# determines whether this base test class should call
# `destroy_process_group` on behalf of the test. Its value is customizable
# by derived TestCase's but it is a pan-TestCase value (cannot be customized
# for each test).
@property
def destroy_pg_upon_exit(self) -> bool:
return True
@property
def world_size(self) -> int:
return DEFAULT_WORLD_SIZE
def join_or_run(self, fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MAIN_PROCESS_RANK:
self._join_processes(fn)
else:
fn()
return types.MethodType(wrapper, self)
# The main process spawns N subprocesses that run the test.
# Constructor patches current instance test method to
# assume the role of the main process and join its subprocesses,
# or run the underlying test function.
def __init__(
self, method_name: str = "runTest", methodName: str = "runTest"
) -> None:
# methodName is the correct naming in unittest and testslide uses keyword arguments.
# So we need to use both to 1) not break BC and, 2) support testslide.
if methodName != "runTest":
method_name = methodName
super().__init__(method_name)
try:
fn = getattr(self, method_name)
setattr(self, method_name, self.join_or_run(fn))
except AttributeError as e:
if methodName != "runTest":
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError(
f"no such test method in {self.__class__}: {methodName}"
) from e
def setUp(self) -> None:
super().setUp()
# Used for tests that are expected to return a non-0 exit code, such as
# SIGABRT thrown by watchdog.
self.special_return_code_checks: dict = {}
# Used for tests that may return any exit code, which makes it hard to
# check. This is rare, use with caution.
self.skip_return_code_checks: list = []
self.processes = [] # type: ignore[var-annotated]
self.rank = self.MAIN_PROCESS_RANK
self.file_name = tempfile.NamedTemporaryFile(delete=False).name
# pid to pipe consisting of error message from process.
self.pid_to_pipe = {} # type: ignore[var-annotated]
def tearDown(self) -> None:
super().tearDown()
for p in self.processes:
p.terminate()
# Each Process instance holds a few open file descriptors. The unittest
# runner creates a new TestCase instance for each test method and keeps
# it alive until the end of the entire suite. We must thus reset the
# processes to prevent an effective file descriptor leak.
self.processes = []
def _current_test_name(self) -> str:
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
return self.id().split(".")[-1]
def _start_processes(self, proc) -> None:
self.processes = []
for rank in range(int(self.world_size)):
parent_conn, child_conn = torch.multiprocessing.Pipe()
process = proc(
target=self.__class__._run,
name="process " + str(rank),
args=(
rank,
self._current_test_name(),
self.file_name,
child_conn,
),
kwargs={
"fake_pg": getattr(self, "fake_pg", False),
},
)
process.start()
logger.info("Started process %s with pid %s", rank, process.pid)
self.pid_to_pipe[process.pid] = parent_conn
self.processes.append(process)
def _spawn_processes(self) -> None:
try:
torch.multiprocessing.set_start_method("spawn")
except RuntimeError:
pass
proc = torch.multiprocessing.get_context("spawn").Process
self._start_processes(proc)
class Event(Enum):
GET_TRACEBACK = 1
@staticmethod
def _event_listener(parent_pipe, signal_pipe, rank: int):
logger.debug("Starting event listener thread for rank %s", rank)
while True:
ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe])
if parent_pipe in ready_pipes:
if parent_pipe.closed:
logger.debug(
"Pipe closed for process %s, stopping event listener thread",
rank,
)
return
event = parent_pipe.recv()
logger.info("Received event %s on process %s", event, rank)
if event == MultiProcessTestCase.Event.GET_TRACEBACK:
# Return traceback to the parent process.
with tempfile.NamedTemporaryFile(mode="r+") as tmp_file:
faulthandler.dump_traceback(tmp_file)
# Flush buffers and seek to read from the beginning
tmp_file.flush()
tmp_file.seek(0)
parent_pipe.send(tmp_file.read())
logger.info("Process %s sent traceback", rank)
if signal_pipe in ready_pipes:
return
@classmethod
def _run(
cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs
) -> None:
self = cls(test_name)
self.rank = rank
self.file_name = file_name
self.run_test(test_name, parent_pipe)
def run_test(self, test_name: str, parent_pipe) -> None:
# Start event listener thread.
signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False)
event_listener_thread = threading.Thread(
target=MultiProcessTestCase._event_listener,
args=(parent_pipe, signal_recv_pipe, self.rank),
daemon=True,
)
event_listener_thread.start()
if sys.platform != "win32" and sys.platform != "darwin":
# Register signal handler to dump stack traces on FATALs.
# Windows and MacOS do not support the signal handlers.
torch._C._set_print_stack_traces_on_fatal_signal(True)
# Show full C++ stacktraces when a Python error originating from C++ is raised.
os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
common_utils.set_rng_seed()
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
# We're retrieving a corresponding test and executing it.
try:
getattr(self, test_name)()
except unittest.SkipTest as se:
logger.info( # noqa: G200
"Process %s skipping test %s for following reason: %s",
self.rank,
test_name,
str(se),
)
sys.exit(TEST_SKIPS["generic"].exit_code)
except Exception:
logger.error(
"Caught exception: \n%s exiting process %s with exit code: %s",
traceback.format_exc(),
self.rank,
MultiProcessTestCase.TEST_ERROR_EXIT_CODE,
)
# Send error to parent process.
parent_pipe.send(traceback.format_exc())
sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
finally:
if signal_send_pipe is not None:
signal_send_pipe.send(None)
assert event_listener_thread is not None
event_listener_thread.join()
# Close pipe after done with test.
parent_pipe.close()
if self.destroy_pg_upon_exit:
try:
# Some tests do destroy the pgs, and destroy can't be called twice.
# This avoids spewing warnings about improperly shutting down.
c10d.destroy_process_group()
except (AssertionError, ValueError):
pass
def _get_timedout_process_traceback(self) -> None:
pipes = []
for i, process in enumerate(self.processes):
if process.exitcode is None:
pipe = self.pid_to_pipe[process.pid]
try:
pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
pipes.append((i, pipe))
except ConnectionError:
logger.exception(
"Encountered error while trying to get traceback for process %s",
i,
)
# Wait for results.
for rank, pipe in pipes:
try:
# Wait for traceback
if pipe.poll(5):
if pipe.closed:
logger.info(
"Pipe closed for process %s, cannot retrieve traceback",
rank,
)
continue
traceback = pipe.recv()
logger.error(
"Process %s timed out with traceback: \n\n%s", rank, traceback
)
else:
logger.error(
"Could not retrieve traceback for timed out process: %s", rank
)
except ConnectionError:
logger.exception(
"Encountered error while trying to get traceback for process %s",
rank,
)
def _join_processes(self, fn) -> None:
timeout = get_timeout(self.id())
start_time = time.time()
subprocess_error = False
try:
while True:
# check to see if any subprocess exited with an error early.
for i, p in enumerate(self.processes):
# This is the exit code processes exit with if they
# encountered an exception.
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE:
print(
f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes."
)
active_children = torch.multiprocessing.active_children()
for ac in active_children:
ac.terminate()
subprocess_error = True
break
if subprocess_error:
break
# All processes have joined cleanly if they all a valid exitcode
if all(p.exitcode is not None for p in self.processes):
break
# Check if we should time out the test. If so, we terminate each process.
elapsed = time.time() - start_time
if elapsed > timeout:
self._get_timedout_process_traceback()
print(
f"Timing out after {timeout} seconds and killing subprocesses."
)
for p in self.processes:
p.terminate()
break
# Sleep to avoid excessive busy polling.
time.sleep(0.1)
elapsed_time = time.time() - start_time
self._check_return_codes(fn, elapsed_time)
finally:
# Close all pipes
for pipe in self.pid_to_pipe.values():
pipe.close()
def _check_return_codes(self, fn, elapsed_time) -> None:
"""
Checks that the return codes of all spawned processes match, and skips
tests if they returned a return code indicating a skipping condition.
"""
# If no processes are spawned, there is nothing to check.
if not self.processes:
logger.warning(
"Note: no subprocesses were spawned, test was likely skipped."
)
return
first_process = self.processes[0]
# first, we check if there are errors in actual processes
# (via TEST_ERROR_EXIT CODE), and raise an exception for those.
# the reason we do this is to attempt to raise a more helpful error
# message than "Process x terminated/timed out"
# TODO: we should pipe the exception of the failed subprocess here.
# Currently, the actual exception is displayed as a logging output.
errored_processes = [
(i, p)
for i, p in enumerate(self.processes)
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE
]
if errored_processes:
error = ""
for i, process in errored_processes:
# Get error from pipe.
error_message = self.pid_to_pipe[process.pid].recv()
error += (
f"Process {i} exited with error code {MultiProcessTestCase.TEST_ERROR_EXIT_CODE} "
f"and exception:\n{error_message}\n"
)
raise RuntimeError(error)
# If no process exited uncleanly, we check for timeouts, and then ensure
# each process exited cleanly.
for i, p in enumerate(self.processes):
if p.exitcode is None:
raise RuntimeError(
f"Process {i} terminated or timed out after {elapsed_time} seconds"
)
# Skip the test return code check
if fn in self.skip_return_code_checks:
return
for skip in TEST_SKIPS.values():
if first_process.exitcode == skip.exit_code:
if IS_SANDCASTLE:
# Don't use unittest.skip to skip the test on sandcastle
# since it creates tasks for skipped tests assuming there
# is some follow-up needed. Instead just "pass" the test
# with an appropriate message.
logger.info(
"Skipping %s on sandcastle for the following reason: %s",
self.id(),
skip.message,
)
return
else:
raise unittest.SkipTest(skip.message)
# In most cases, we expect test to return exit code 0, standing for success.
expected_return_code = 0
# In some negative tests, we expect test to return non-zero exit code,
# such as watchdog throwing SIGABRT.
if fn in self.special_return_code_checks:
expected_return_code = self.special_return_code_checks[fn]
self.assertEqual(
first_process.exitcode,
expected_return_code,
msg=f"Expected exit code {expected_return_code} but got {first_process.exitcode} for pid: {first_process.pid}",
)
@property
def is_master(self) -> bool:
return self.rank == 0
# Utility base class for distributed Multi Process Test cases
# This abstracts the PG creation and deletion, the backends are selected based
# on device type. The tests functions can be instantiated per device type using
# common_device_type.instantiate_device_type_tests
# other backends can add entry in backend() function
| MultiProcessTestCase |
python | sqlalchemy__sqlalchemy | test/orm/test_selectin_relations.py | {
"start": 94303,
"end": 97325
} | class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
a2_id = Column(ForeignKey("a2.id"))
a2 = relationship("A2")
b = relationship("B")
class A2(Base):
__tablename__ = "a2"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
b = relationship("B")
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
c1_m2o_id = Column(ForeignKey("c1_m2o.id"))
c2_m2o_id = Column(ForeignKey("c2_m2o.id"))
c1_o2m = relationship("C1o2m")
c2_o2m = relationship("C2o2m")
c1_m2o = relationship("C1m2o")
c2_m2o = relationship("C2m2o")
class C1o2m(Base):
__tablename__ = "c1_o2m"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
class C2o2m(Base):
__tablename__ = "c2_o2m"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
class C1m2o(Base):
__tablename__ = "c1_m2o"
id = Column(Integer, primary_key=True)
class C2m2o(Base):
__tablename__ = "c2_m2o"
id = Column(Integer, primary_key=True)
@classmethod
def insert_data(cls, connection):
A, A2, B, C1o2m, C2o2m, C1m2o, C2m2o = cls.classes(
"A", "A2", "B", "C1o2m", "C2o2m", "C1m2o", "C2m2o"
)
s = Session(connection)
b = B(
c1_o2m=[C1o2m()], c2_o2m=[C2o2m()], c1_m2o=C1m2o(), c2_m2o=C2m2o()
)
s.add(A(b=b, a2=A2(b=b)))
s.commit()
def test_o2m(self):
A, A2, B, C1o2m, C2o2m = self.classes("A", "A2", "B", "C1o2m", "C2o2m")
s = fixture_session()
# A -J-> B -L-> C1
# A -J-> B -S-> C2
# A -J-> A2 -J-> B -S-> C1
# A -J-> A2 -J-> B -L-> C2
q = s.query(A).options(
joinedload(A.b).selectinload(B.c2_o2m),
joinedload(A.a2).joinedload(A2.b).selectinload(B.c1_o2m),
)
a1 = q.all()[0]
is_true("c1_o2m" in a1.b.__dict__)
is_true("c2_o2m" in a1.b.__dict__)
def test_m2o(self):
A, A2, B, C1m2o, C2m2o = self.classes("A", "A2", "B", "C1m2o", "C2m2o")
s = fixture_session()
# A -J-> B -L-> C1
# A -J-> B -S-> C2
# A -J-> A2 -J-> B -S-> C1
# A -J-> A2 -J-> B -L-> C2
q = s.query(A).options(
joinedload(A.b).selectinload(B.c2_m2o),
joinedload(A.a2).joinedload(A2.b).selectinload(B.c1_m2o),
)
a1 = q.all()[0]
is_true("c1_m2o" in a1.b.__dict__)
is_true("c2_m2o" in a1.b.__dict__)
| TestExistingRowPopulation |
python | tensorflow__tensorflow | tensorflow/python/distribute/strategy_common_test.py | {
"start": 17105,
"end": 22930
} | class ____(test.TestCase, parameterized.TestCase):
def testDense(self, strategy, tf_function):
if (strategy_test_lib.is_tpu_strategy(strategy) and
tf_function is combinations.no_tf_function):
self.skipTest('Skip TPUStrategy + eager combination.')
@tf_function
def fn():
def replica_fn():
value = array_ops.identity(1.0)
rep_ctx = distribute_lib.get_replica_context()
reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.SUM, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
self.assertEqual(got, 1.0 * strategy.num_replicas_in_sync)
def testSparse(self, strategy, tf_function):
if tf_function is combinations.no_tf_function:
self.skipTest('Skip IndexedSlices + eager combination.')
@tf_function
def fn():
def replica_fn():
value = indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
rep_ctx = distribute_lib.get_replica_context()
reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.MEAN, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
if not strategy_test_lib.is_tpu_strategy(strategy):
self.assertIsInstance(got, indexed_slices.IndexedSlices)
expect = indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
self.assertAllEqual(
ops.convert_to_tensor(got), ops.convert_to_tensor(expect))
def testSparseTuple(self, strategy, tf_function):
if tf_function is combinations.no_tf_function:
self.skipTest('Skip IndexedSlices + eager combination.')
@tf_function
def fn():
def replica_fn():
value1 = indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
value2 = indexed_slices.IndexedSlices(
values=array_ops.identity([[2.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
rep_ctx = distribute_lib.get_replica_context()
reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.SUM, [value1, value2])
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
if not strategy_test_lib.is_tpu_strategy(strategy):
for g in got:
self.assertIsInstance(g, indexed_slices.IndexedSlices)
expect = [
indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1])),
indexed_slices.IndexedSlices(
values=array_ops.identity([[2.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
]
self.assertAllEqual(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
def testNestedInput(self, strategy, tf_function):
if tf_function is combinations.no_tf_function:
self.skipTest('Skip IndexedSlices + eager combination.')
@tf_function
def fn():
def replica_fn():
value = (array_ops.identity(1.0),
indexed_slices.IndexedSlices(
values=array_ops.identity([[1.0]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1])),
array_ops.identity(2.0),
indexed_slices.IndexedSlices(
values=array_ops.identity([[2.0]]),
indices=array_ops.identity([1]),
dense_shape=array_ops.identity([5, 1])))
rep_ctx = distribute_lib.get_replica_context()
reduced = rep_ctx.all_reduce(reduce_util.ReduceOp.SUM, value)
return reduced
return strategy.experimental_local_results(strategy.run(replica_fn))
got = fn()[0]
expect = (1.0 * strategy.num_replicas_in_sync,
indexed_slices.IndexedSlices(
values=array_ops.identity(
[[1.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1])),
2.0 * strategy.num_replicas_in_sync,
indexed_slices.IndexedSlices(
values=array_ops.identity(
[[2.0 * strategy.num_replicas_in_sync]]),
indices=array_ops.identity([1]),
dense_shape=array_ops.identity([5, 1])))
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
def _make_indexed_slices(values, indices, dense_shape):
tensor = indexed_slices.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _get_num_replicas_per_client(strategy):
if isinstance(strategy, CollectiveAllReduceStrategy):
resolver = strategy.cluster_resolver
return max(nest.flatten(resolver.num_accelerators())[0], 1)
else:
return strategy.num_replicas_in_sync
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=['eager']))
| AllReduceTest |
python | django__django | tests/generic_relations/test_forms.py | {
"start": 590,
"end": 14931
} | class ____(TestCase):
def test_output(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(
"".join(form.as_p() for form in formset.forms),
"""
<p><label
for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-0-tag"
type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag"
maxlength="50"></p>
<p><label
for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
Delete:</label>
<input type="checkbox"
name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
<input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>
""",
)
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(
"".join(form.as_p() for form in formset.forms),
"""
<p><label
for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-0-tag"
type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag"
maxlength="50"></p>
<p><label
for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
Delete:</label>
<input type="checkbox"
name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
<input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>
""",
)
platypus = Animal.objects.create(
common_name="Platypus",
latin_name="Ornithorhynchus anatinus",
)
platypus.tags.create(tag="shiny")
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(tag="shiny", object_id=platypus.id).id
self.assertHTMLEqual(
"".join(form.as_p() for form in formset.forms),
"""
<p><label
for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-0-tag"
type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag"
value="shiny" maxlength="50"></p>
<p><label
for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
Delete:</label>
<input type="checkbox"
name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
<input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-0-id"
value="%s"
id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>
<p><label
for="id_generic_relations-taggeditem-content_type-object_id-1-tag">
Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-1-tag"
type="text"
name="generic_relations-taggeditem-content_type-object_id-1-tag"
maxlength="50"></p>
<p><label
for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">
Delete:</label>
<input type="checkbox"
name="generic_relations-taggeditem-content_type-object_id-1-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">
<input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-1-id"
id="id_generic_relations-taggeditem-content_type-object_id-1-id"></p>
"""
% tagged_item_id,
)
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
formset = GenericFormSet(instance=lion, prefix="x")
self.assertHTMLEqual(
"".join(form.as_p() for form in formset.forms),
"""
<p><label for="id_x-0-tag">Tag:</label>
<input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50"></p>
<p><label for="id_x-0-DELETE">Delete:</label>
<input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE">
<input type="hidden" name="x-0-id" id="id_x-0-id"></p>
""",
)
def test_options(self):
TaggedItemFormSet = generic_inlineformset_factory(
TaggedItem,
can_delete=False,
exclude=["tag"],
extra=3,
)
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
harmless = platypus.tags.create(tag="harmless")
mammal = platypus.tags.create(tag="mammal")
# Works without a queryset.
formset = TaggedItemFormSet(instance=platypus)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<input type="hidden" '
'name="generic_relations-taggeditem-content_type-object_id-0-id" '
'value="%s" '
'id="id_generic_relations-taggeditem-content_type-object_id-0-id">'
% harmless.pk,
)
self.assertEqual(formset.forms[0].instance, harmless)
self.assertEqual(formset.forms[1].instance, mammal)
self.assertIsNone(formset.forms[2].instance.pk)
# A queryset can be used to alter display ordering.
formset = TaggedItemFormSet(
instance=platypus, queryset=TaggedItem.objects.order_by("-tag")
)
self.assertEqual(len(formset.forms), 5)
self.assertEqual(formset.forms[0].instance, mammal)
self.assertEqual(formset.forms[1].instance, harmless)
self.assertIsNone(formset.forms[2].instance.pk)
# A queryset that omits items.
formset = TaggedItemFormSet(
instance=platypus,
queryset=TaggedItem.objects.filter(tag__startswith="harm"),
)
self.assertEqual(len(formset.forms), 4)
self.assertEqual(formset.forms[0].instance, harmless)
self.assertIsNone(formset.forms[1].instance.pk)
def test_get_queryset_ordering(self):
"""
BaseGenericInlineFormSet.get_queryset() adds default ordering, if
needed.
"""
inline_formset = generic_inlineformset_factory(TaggedItem, exclude=("tag",))
formset = inline_formset(instance=Gecko.objects.create())
self.assertIs(formset.get_queryset().ordered, True)
def test_initial(self):
quartz = Mineral.objects.create(name="Quartz", hardness=7)
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
ctype = ContentType.objects.get_for_model(quartz)
initial_data = [
{
"tag": "lizard",
"content_type": ctype.pk,
"object_id": quartz.pk,
}
]
formset = GenericFormSet(initial=initial_data)
self.assertEqual(formset.forms[0].initial, initial_data[0])
def test_meta_widgets(self):
"""TaggedItemForm has a widget defined in Meta."""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form["tag"].field.widget, CustomWidget)
@isolate_apps("generic_relations")
def test_incorrect_content_type(self):
class BadModel(models.Model):
content_type = models.PositiveIntegerField()
msg = (
"fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to "
"ContentType"
)
with self.assertRaisesMessage(Exception, msg):
generic_inlineformset_factory(BadModel, TaggedItemForm)
def test_save_new_uses_form_save(self):
class SaveTestForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.saved_by = "custom method"
return super().save(*args, **kwargs)
Formset = generic_inlineformset_factory(
ForProxyModelModel, fields="__all__", form=SaveTestForm
)
instance = ProxyRelatedModel.objects.create()
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-title": "foo",
}
formset = Formset(data, instance=instance, prefix="form")
self.assertTrue(formset.is_valid())
new_obj = formset.save()[0]
self.assertEqual(new_obj.saved_by, "custom method")
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(
ForProxyModelModel, fields="__all__", for_concrete_model=False
)
instance = ProxyRelatedModel.objects.create()
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-title": "foo",
}
formset = Formset(data, instance=instance, prefix="form")
self.assertTrue(formset.is_valid())
(new_obj,) = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(
ForProxyModelModel, fields="__all__", for_concrete_model=True
)
instance = ProxyRelatedModel.objects.create()
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-title": "foo",
}
formset = Formset(data, instance=instance, prefix="form")
self.assertTrue(formset.is_valid())
(new_obj,) = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
def test_initial_count(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "",
}
formset = GenericFormSet(data=data, prefix="form")
self.assertEqual(formset.initial_form_count(), 3)
formset = GenericFormSet(data=data, prefix="form", save_as_new=True)
self.assertEqual(formset.initial_form_count(), 0)
def test_save_as_new(self):
"""
The save_as_new parameter creates new items that are associated with
the object.
"""
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
yellow = lion.tags.create(tag="yellow")
hairy = lion.tags.create(tag="hairy")
GenericFormSet = generic_inlineformset_factory(TaggedItem)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "",
"form-0-id": str(yellow.pk),
"form-0-tag": "hunts",
"form-1-id": str(hairy.pk),
"form-1-tag": "roars",
}
formset = GenericFormSet(data, instance=lion, prefix="form", save_as_new=True)
self.assertTrue(formset.is_valid())
tags = formset.save()
self.assertEqual([tag.tag for tag in tags], ["hunts", "roars"])
hunts, roars = tags
self.assertSequenceEqual(
lion.tags.order_by("tag"), [hairy, hunts, roars, yellow]
)
def test_absolute_max(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, absolute_max=1500)
data = {
"form-TOTAL_FORMS": "1501",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
}
formset = GenericFormSet(data=data, prefix="form")
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 1500)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 1000 forms."],
)
def test_absolute_max_with_max_num(self):
GenericFormSet = generic_inlineformset_factory(
TaggedItem,
max_num=20,
absolute_max=100,
)
data = {
"form-TOTAL_FORMS": "101",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
}
formset = GenericFormSet(data=data, prefix="form")
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 100)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 20 forms."],
)
def test_can_delete_extra(self):
GenericFormSet = generic_inlineformset_factory(
TaggedItem,
can_delete=True,
can_delete_extra=True,
extra=2,
)
formset = GenericFormSet()
self.assertEqual(len(formset), 2)
self.assertIn("DELETE", formset.forms[0].fields)
self.assertIn("DELETE", formset.forms[1].fields)
def test_disable_delete_extra(self):
GenericFormSet = generic_inlineformset_factory(
TaggedItem,
can_delete=True,
can_delete_extra=False,
extra=2,
)
formset = GenericFormSet()
self.assertEqual(len(formset), 2)
self.assertNotIn("DELETE", formset.forms[0].fields)
self.assertNotIn("DELETE", formset.forms[1].fields)
| GenericInlineFormsetTests |
python | huggingface__transformers | src/transformers/models/sam3/configuration_sam3.py | {
"start": 4686,
"end": 7796
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam3VisionModel`]. It is used to instantiate a SAM
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*):
Configuration for the vision backbone. This is used to instantiate the backbone using
`AutoModel.from_config`.
fpn_hidden_size (`int`, *optional*, defaults to 256):
The hidden dimension of the FPN.
backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[288, 288], [144, 144], [72, 72]]`):
The spatial sizes (height, width) of the feature maps from the backbone at different scales.
scale_factors (`list[float]`, *optional*, defaults to `[4.0, 2.0, 1.0, 0.5]`):
Scale factors for FPN multi-scale features. List of scaling factors for each FPN level.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "vision_config"
model_type = "sam3_vision_model"
sub_configs = {
"backbone_config": AutoConfig,
}
def __init__(
self,
backbone_config=None,
fpn_hidden_size=256,
backbone_feature_sizes=None,
scale_factors=None,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
scale_factors = [4.0, 2.0, 1.0, 0.5] if scale_factors is None else scale_factors
if backbone_feature_sizes is None:
backbone_feature_sizes = [[288, 288], [144, 144], [72, 72]]
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "sam3_vit_model")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif backbone_config is None:
backbone_config = CONFIG_MAPPING["sam3_vit_model"]()
self.backbone_config = backbone_config
# Neck
self.fpn_hidden_size = fpn_hidden_size
self.scale_factors = scale_factors
self.backbone_feature_sizes = backbone_feature_sizes
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
super().__init__(**kwargs)
| Sam3VisionConfig |
python | getsentry__sentry | tests/sentry/rules/actions/test_notify_event.py | {
"start": 294,
"end": 769
} | class ____(RuleTestCase):
rule_cls = NotifyEventAction
def test_applies_correctly(self) -> None:
event = self.get_event()
plugin = MagicMock()
rule = self.get_rule()
rule.get_plugins = lambda: (LegacyPluginService(plugin),)
results = list(rule.after(event=event))
assert len(results) == 1
assert plugin.should_notify.call_count == 1
assert results[0].callback is plugin.rule_notify
| NotifyEventActionTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/odnoklassniki/provider.py | {
"start": 242,
"end": 857
} | class ____(ProviderAccount):
def get_profile_url(self):
return "https://ok.ru/profile/" + self.account.extra_data["uid"]
def get_avatar_url(self):
ret = None
pic_big_url = self.account.extra_data.get("pic1024x768")
pic_medium_url = self.account.extra_data.get("pic640x480")
pic_small_url = self.account.extra_data.get("pic190x190")
if pic_big_url:
return pic_big_url
elif pic_medium_url:
return pic_medium_url
elif pic_small_url:
return pic_small_url
else:
return ret
| OdnoklassnikiAccount |
python | django__django | tests/user_commands/management/commands/subparser_dest.py | {
"start": 54,
"end": 373
} | class ____(BaseCommand):
def add_arguments(self, parser):
subparsers = parser.add_subparsers(dest="subcommand", required=True)
parser_foo = subparsers.add_parser("foo")
parser_foo.add_argument("--bar")
def handle(self, *args, **options):
self.stdout.write(",".join(options))
| Command |
python | aimacode__aima-python | probability.py | {
"start": 593,
"end": 2352
} | class ____:
"""A discrete probability distribution. You name the random variable
in the constructor, then assign and query probability of values.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H']
0.25
>>> P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500})
>>> P['lo'], P['med'], P['hi']
(0.125, 0.375, 0.5)
"""
def __init__(self, var_name='?', freq=None):
"""If freq is given, it is a dictionary of values - frequency pairs,
then ProbDist is normalized."""
self.prob = {}
self.var_name = var_name
self.values = []
if freq:
for (v, p) in freq.items():
self[v] = p
self.normalize()
def __getitem__(self, val):
"""Given a value, return P(value)."""
try:
return self.prob[val]
except KeyError:
return 0
def __setitem__(self, val, p):
"""Set P(val) = p."""
if val not in self.values:
self.values.append(val)
self.prob[val] = p
def normalize(self):
"""Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
Raises a ZeroDivisionError if the sum of the values is 0."""
total = sum(self.prob.values())
if not np.isclose(total, 1.0):
for val in self.prob:
self.prob[val] /= total
return self
def show_approx(self, numfmt='{:.3g}'):
"""Show the probabilities rounded and sorted by key, for the
sake of portable doctests."""
return ', '.join([('{}: ' + numfmt).format(v, p) for (v, p) in sorted(self.prob.items())])
def __repr__(self):
return "P({})".format(self.var_name)
| ProbDist |
python | pytorch__pytorch | torch/testing/_internal/autograd_function_db.py | {
"start": 13346,
"end": 14528
} | class ____(torch.autograd.Function):
generate_vmap_rule = True
@staticmethod
def forward(x, y):
return x.clone(), y.clone()
@staticmethod
def setup_context(ctx, inputs, outputs):
pass
@staticmethod
def backward(ctx, gx, gy):
# Intentionally returning torch.zeros instead of zeros_like or new_zeros.
# Also intentionally not None.
return (
# Intentionally too-large gradient
torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device),
torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
)
@staticmethod
def jvp(ctx, gx, gy):
# Intentionally returning torch.zeros instead of zeros_like or new_zeros.
# Also intentionally not None.
return (
torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device),
torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
)
def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg(3, 5))
| ZeroGradientsGenVmap |
python | aimacode__aima-python | planning.py | {
"start": 31156,
"end": 36267
} | class ____:
"""
Contains the state of the planning problem
and exhaustive list of actions which use the
states as pre-condition.
"""
def __init__(self, kb):
"""Initializes variables to hold state and action details of a level"""
self.kb = kb
# current state
self.current_state = kb.clauses
# current action to state link
self.current_action_links = {}
# current state to action link
self.current_state_links = {}
# current action to next state link
self.next_action_links = {}
# next state to current action link
self.next_state_links = {}
# mutually exclusive actions
self.mutex = []
def __call__(self, actions, objects):
self.build(actions, objects)
self.find_mutex()
def separate(self, e):
"""Separates an iterable of elements into positive and negative parts"""
positive = []
negative = []
for clause in e:
if clause.op[:3] == 'Not':
negative.append(clause)
else:
positive.append(clause)
return positive, negative
def find_mutex(self):
"""Finds mutually exclusive actions"""
# Inconsistent effects
pos_nsl, neg_nsl = self.separate(self.next_state_links)
for negeff in neg_nsl:
new_negeff = Expr(negeff.op[3:], *negeff.args)
for poseff in pos_nsl:
if new_negeff == poseff:
for a in self.next_state_links[poseff]:
for b in self.next_state_links[negeff]:
if {a, b} not in self.mutex:
self.mutex.append({a, b})
# Interference will be calculated with the last step
pos_csl, neg_csl = self.separate(self.current_state_links)
# Competing needs
for pos_precond in pos_csl:
for neg_precond in neg_csl:
new_neg_precond = Expr(neg_precond.op[3:], *neg_precond.args)
if new_neg_precond == pos_precond:
for a in self.current_state_links[pos_precond]:
for b in self.current_state_links[neg_precond]:
if {a, b} not in self.mutex:
self.mutex.append({a, b})
# Inconsistent support
state_mutex = []
for pair in self.mutex:
next_state_0 = self.next_action_links[list(pair)[0]]
if len(pair) == 2:
next_state_1 = self.next_action_links[list(pair)[1]]
else:
next_state_1 = self.next_action_links[list(pair)[0]]
if (len(next_state_0) == 1) and (len(next_state_1) == 1):
state_mutex.append({next_state_0[0], next_state_1[0]})
self.mutex = self.mutex + state_mutex
def build(self, actions, objects):
"""Populates the lists and dictionaries containing the state action dependencies"""
for clause in self.current_state:
p_expr = Expr('P' + clause.op, *clause.args)
self.current_action_links[p_expr] = [clause]
self.next_action_links[p_expr] = [clause]
self.current_state_links[clause] = [p_expr]
self.next_state_links[clause] = [p_expr]
for a in actions:
num_args = len(a.args)
possible_args = tuple(itertools.permutations(objects, num_args))
for arg in possible_args:
if a.check_precond(self.kb, arg):
for num, symbol in enumerate(a.args):
if not symbol.op.islower():
arg = list(arg)
arg[num] = symbol
arg = tuple(arg)
new_action = a.substitute(Expr(a.name, *a.args), arg)
self.current_action_links[new_action] = []
for clause in a.precond:
new_clause = a.substitute(clause, arg)
self.current_action_links[new_action].append(new_clause)
if new_clause in self.current_state_links:
self.current_state_links[new_clause].append(new_action)
else:
self.current_state_links[new_clause] = [new_action]
self.next_action_links[new_action] = []
for clause in a.effect:
new_clause = a.substitute(clause, arg)
self.next_action_links[new_action].append(new_clause)
if new_clause in self.next_state_links:
self.next_state_links[new_clause].append(new_action)
else:
self.next_state_links[new_clause] = [new_action]
def perform_actions(self):
"""Performs the necessary actions and returns a new Level"""
new_kb = FolKB(list(set(self.next_state_links.keys())))
return Level(new_kb)
| Level |
python | jazzband__django-polymorphic | src/polymorphic/admin/parentadmin.py | {
"start": 845,
"end": 952
} | class ____(RuntimeError):
"The admin model can't be registered anymore at this point."
| RegistrationClosed |
python | tensorflow__tensorflow | tensorflow/python/trackable/data_structures.py | {
"start": 24567,
"end": 26693
} | class ____(TrackableDataStructure, collections_abc.Mapping):
"""An append-only trackable mapping data structure with string keys.
Maintains checkpoint dependencies on its contents (which must also be
trackable), named based on its keys.
Note that once a key has been added, it may not be deleted or replaced.
"""
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `dict()`."""
super().__init__()
self._storage = self._make_storage(*args, **kwargs)
self._storage.update(
{key: self._track_value(
value, name=self._name_element(key))
for key, value in self._storage.items()})
def __copy__(self):
return type(self)(copy.copy(self._storage))
def __deepcopy__(self, memo):
return type(self)(copy.deepcopy(self._storage, memo))
def _make_storage(self, *args, **kwargs):
return dict(*args, **kwargs)
@property
def _values(self):
"""Collect values for TrackableDataStructure."""
# Sort items deterministically by key
ordered = list(zip(*sorted(self.items(), key=lambda it: it[0])))
if ordered:
return ordered[1]
return []
def _name_element(self, key):
if not isinstance(key, str):
raise TypeError(
f"Mapping accepts only string keys, but got a key {repr(key)}.")
return str(key)
def __setitem__(self, key, value):
name = self._name_element(key)
value = self._track_value(value, name=name)
current_value = self._storage.setdefault(key, value)
if current_value is not value:
raise ValueError(
"Mappings are an append-only data structure. Tried to overwrite the "
f"key '{key}' with value {value}, but it already contains "
f"{current_value}")
def update(self, *args, **kwargs):
for key, value in dict(*args, **kwargs).items():
self[key] = value
def __getitem__(self, key):
return self._storage[key]
def __len__(self):
return len(self._storage)
def __repr__(self):
return "Mapping(%s)" % (repr(self._storage),)
def __iter__(self):
return iter(self._storage)
| Mapping |
python | huggingface__transformers | src/transformers/trainer.py | {
"start": 8465,
"end": 258416
} | class ____:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.
<Tip>
[`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
models.
</Tip>
args ([`TrainingArguments`], *optional*):
The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
`output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
data_collator (`DataCollator`, *optional*):
The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
default to [`default_data_collator`] if no `processing_class` is provided, an instance of
[`DataCollatorWithPadding`] otherwise if the processing_class is a feature extractor or tokenizer.
train_dataset (Union[`torch.utils.data.Dataset`, `torch.utils.data.IterableDataset`, `datasets.Dataset`], *optional*):
The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed.
Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
`torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
sets the seed of the RNGs used.
eval_dataset (Union[`torch.utils.data.Dataset`, dict[str, `torch.utils.data.Dataset`], `datasets.Dataset`]), *optional*):
The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
dataset prepending the dictionary key to the metric name.
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
Processing class used to process the data. If provided, will be used to automatically process the inputs
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
reuse the fine-tuned model.
model_init (`Callable[[], PreTrainedModel]`, *optional*):
A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to
be able to choose different architectures according to hyper parameters (such as layer count, sizes of
inner layers, dropout probabilities etc).
compute_loss_func (`Callable`, *optional*):
A function that accepts the raw model outputs, labels, and the number of items in the entire accumulated
batch (batch_size * gradient_accumulation_steps) and returns the loss. For example, see the default [loss function](https://github.com/huggingface/transformers/blob/052e652d6d53c2b26ffde87e039b723949a53493/src/transformers/trainer.py#L3618) used by [`Trainer`].
compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
a dictionary string to metric values. *Note* When passing TrainingArgs with `batch_eval_metrics` set to
`True`, your compute_metrics function must take a boolean `compute_result` argument. This will be triggered
after the last eval batch to signal that the function needs to calculate and return the global summary
statistics rather than accumulating the batch-level statistics
callbacks (List of [`TrainerCallback`], *optional*):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in [here](callback).
If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`):
A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your
model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
optimizer_cls_and_kwargs (`tuple[Type[torch.optim.Optimizer], dict[str, Any]]`, *optional*):
A tuple containing the optimizer class and keyword arguments to use.
Overrides `optim` and `optim_args` in `args`. Incompatible with the `optimizers` argument.
Unlike `optimizers`, this argument avoids the need to place model parameters on the correct devices before initializing the Trainer.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
A function that preprocess the logits right before caching them at each evaluation step. Must take two
tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
by this function will be reflected in the predictions received by `compute_metrics`.
Note that the labels (second parameter) will be `None` if the dataset does not have them.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to `False` if model parallel or deepspeed is used, or if the default
`TrainingArguments.place_model_on_device` is overridden to return `False` .
- **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
in `train`)
"""
# Those are used as methods of the Trainer in examples.
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: PreTrainedModel | nn.Module | None = None,
args: TrainingArguments | None = None,
data_collator: DataCollator | None = None,
train_dataset: Union[Dataset, IterableDataset, "datasets.Dataset"] | None = None,
eval_dataset: Union[Dataset, dict[str, Dataset], "datasets.Dataset"] | None = None,
processing_class: PreTrainedTokenizerBase
| BaseImageProcessor
| FeatureExtractionMixin
| ProcessorMixin
| None = None,
model_init: Callable[..., PreTrainedModel] | None = None,
compute_loss_func: Callable | None = None,
compute_metrics: Callable[[EvalPrediction], dict] | None = None,
callbacks: list[TrainerCallback] | None = None,
optimizers: tuple[torch.optim.Optimizer | None, torch.optim.lr_scheduler.LambdaLR | None] = (None, None),
optimizer_cls_and_kwargs: tuple[type[torch.optim.Optimizer], dict[str, Any]] | None = None,
preprocess_logits_for_metrics: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None,
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
if args.batch_eval_metrics and compute_metrics is not None:
if "compute_result" not in inspect.signature(compute_metrics).parameters:
raise ValueError(
"When using `batch_eval_metrics`, your `compute_metrics` function must take a `compute_result`"
" boolean argument which will be triggered after the last batch of the eval set to signal that the"
" summary statistics should be returned by the function."
)
if args.eval_strategy is not None and args.eval_strategy != "no" and eval_dataset is None:
raise ValueError(
f"You have set `args.eval_strategy` to {args.eval_strategy} but you didn't pass an `eval_dataset` to `Trainer`. Either set `args.eval_strategy` to `no` or pass an `eval_dataset`. "
)
if args.save_strategy == SaveStrategy.BEST or args.load_best_model_at_end:
if args.metric_for_best_model is None:
raise ValueError(
"`args.metric_for_best_model` must be provided when using 'best' save_strategy or if `args.load_best_model_at_end` is set to `True`."
)
self.args = args
self.compute_loss_func = compute_loss_func
# Seed must be set before instantiating the model when using model
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
self.model = model
self.create_accelerator_and_postprocess()
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# set the correct log level depending on the node
log_level = args.get_process_log_level()
logging.set_verbosity(log_level)
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will"
" overwrite your model when calling the `train` method. This will become a fatal error in the next"
" release.",
FutureWarning,
)
self.model_init = model_init
if model.__class__.__name__ in MODEL_MAPPING_NAMES:
raise ValueError(
f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only "
"computes hidden states and does not accept any labels. You should choose a model with a head "
"suitable for your task like any of the `AutoModelForXxx` listed at "
"https://huggingface.co/docs/transformers/model_doc/auto"
)
self.is_model_parallel = False
if getattr(model, "hf_device_map", None) is not None:
devices = [device for device in set(model.hf_device_map.values()) if device not in ["cpu", "disk"]]
if len(devices) > 1:
self.is_model_parallel = True
elif len(devices) == 1:
self.is_model_parallel = self.args.device != torch.device(devices[0])
if self.args.use_liger_kernel:
if is_liger_kernel_available():
from liger_kernel.transformers import _apply_liger_kernel_to_instance
# Prepare kernel config - use provided config or default (empty dict for default behavior)
kernel_config = self.args.liger_kernel_config if self.args.liger_kernel_config is not None else {}
if isinstance(model, PreTrainedModel):
# Patch the model with liger kernels. Use the specified or default kernel configurations.
_apply_liger_kernel_to_instance(model=model, **kernel_config)
elif hasattr(model, "get_base_model") and isinstance(model.get_base_model(), PreTrainedModel):
# Patch the base model with liger kernels where model is a PeftModel. Use the specified or default kernel configurations.
_apply_liger_kernel_to_instance(model=model.get_base_model(), **kernel_config)
else:
logger.warning(
"The model is not an instance of PreTrainedModel. No liger kernels will be applied."
)
else:
raise ImportError(
"You have set `use_liger_kernel` to `True` but liger-kernel >= 0.3.0 is not available. "
"Please install it with `pip install liger-kernel`"
)
_is_quantized_and_base_model = getattr(model, "is_quantized", False) and not getattr(
model, "_hf_peft_config_loaded", False
)
_quantization_method_supports_training = (
getattr(model, "hf_quantizer", None) is not None and model.hf_quantizer.is_trainable
)
_is_model_quantized_and_qat_trainable = getattr(model, "hf_quantizer", None) is not None and getattr(
model.hf_quantizer, "is_qat_trainable", False
)
# Filter out quantized + compiled models
if _is_quantized_and_base_model and hasattr(model, "_orig_mod"):
raise ValueError(
"You cannot fine-tune quantized model with `torch.compile()` make sure to pass a non-compiled model when fine-tuning a quantized model with PEFT"
)
# At this stage the model is already loaded
if _is_quantized_and_base_model and not _is_peft_model(model) and not _is_model_quantized_and_qat_trainable:
raise ValueError(
"You cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of"
" the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft"
" for more details"
)
elif _is_quantized_and_base_model and not _quantization_method_supports_training:
raise ValueError(
f"The model you are trying to fine-tune is quantized with {model.hf_quantizer.quantization_config.quant_method}"
" but that quantization method do not support training. Please open an issue on GitHub: https://github.com/huggingface/transformers"
f" to request the support for training support for {model.hf_quantizer.quantization_config.quant_method}"
)
self.is_fsdp_xla_enabled = args.fsdp_config["xla"]
if len(args.fsdp) > 0:
if self.is_deepspeed_enabled:
raise ValueError(
"Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if not args.fsdp_config["xla"] and args.parallel_mode != ParallelMode.DISTRIBUTED:
raise ValueError("Using fsdp only works in distributed training.")
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first
# 4. FSDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or self.is_deepspeed_enabled
or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train)
or self.is_fsdp_xla_enabled
or self.is_fsdp_enabled
):
self.place_model_on_device = False
default_collator = (
DataCollatorWithPadding(processing_class)
if processing_class is not None
and isinstance(processing_class, (PreTrainedTokenizerBase, SequenceFeatureExtractor))
else default_data_collator
)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.processing_class = processing_class
# Bnb Quantized models doesn't support `.to` operation.
if (
self.place_model_on_device
and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES
):
self._move_model_to_device(model, args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
# Just in case the model was wrapped outside of the `Trainer`
unwrapped_model = self.accelerator.unwrap_model(model)
# We also unwrap peft model
if _is_peft_model(unwrapped_model):
if hasattr(unwrapped_model, "get_base_model"):
unwrapped_model = unwrapped_model.get_base_model()
elif hasattr(unwrapped_model, "base_model") and hasattr(unwrapped_model.base_model, "model"):
unwrapped_model = unwrapped_model.base_model.model
else:
raise AttributeError("Cannot extract base model safely from this PEFT wrapper.")
# Check if the model has explicit setup for loss kwargs,
# if not, check if `**kwargs` are in model.forward
if hasattr(unwrapped_model, "accepts_loss_kwargs"):
self.model_accepts_loss_kwargs = unwrapped_model.accepts_loss_kwargs
else:
forward_params = inspect.signature(unwrapped_model.forward).parameters
self.model_accepts_loss_kwargs = any(
k.kind == inspect.Parameter.VAR_KEYWORD for k in forward_params.values()
)
# Override for Sequence Parallelism: SP computes its own good_tokens count, so skip num_items_in_batch calculation
pc = getattr(self.accelerator, "parallelism_config", None)
if pc is not None and pc.sp_backend == "deepspeed" and pc.sp_enabled:
self.model_accepts_loss_kwargs = False
self.neftune_noise_alpha = args.neftune_noise_alpha
self.compute_metrics = compute_metrics
self.preprocess_logits_for_metrics = preprocess_logits_for_metrics
self.optimizer, self.lr_scheduler = optimizers
self.optimizer_cls_and_kwargs = optimizer_cls_and_kwargs
if self.optimizer_cls_and_kwargs is not None and self.optimizer is not None:
raise RuntimeError("Passing both `optimizers` and `optimizer_cls_and_kwargs` arguments is incompatible.")
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument. "
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
if is_torch_xla_available() and self.optimizer is not None:
for param in self.model.parameters():
model_device = param.device
break
for param_group in self.optimizer.param_groups:
if len(param_group["params"]) > 0:
optimizer_device = param_group["params"][0].device
break
if model_device != optimizer_device:
raise ValueError(
"The model and the optimizer parameters are not on the same device, which probably means you"
" created an optimizer around your model **before** putting on the device and passing it to the"
" `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and"
" `model.to(xm.xla_device())` is performed before the optimizer creation in your script."
)
if (self.is_fsdp_xla_enabled or self.is_fsdp_enabled) and (
self.optimizer is not None or self.lr_scheduler is not None
):
raise RuntimeError(
"Passing `optimizers` is not allowed if PyTorch FSDP is enabled. "
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create distant repo and output directory if needed
self.hub_model_id = None
if self.args.push_to_hub:
self.init_hf_repo()
if self.args.should_save:
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise TypeError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0 and args.num_train_epochs > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0:
raise ValueError(
"The train_dataset does not implement __len__, max_steps has to be specified. "
"The number of steps needs to be known in advance for the learning rate scheduler."
)
if (
train_dataset is not None
and isinstance(train_dataset, torch.utils.data.IterableDataset)
and args.group_by_length
):
raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset")
self._signature_columns = None
# Mixed precision setup for SageMaker Model Parallel
if is_sagemaker_mp_enabled():
# BF16 + model parallelism in SageMaker: currently not supported, raise an error
if args.bf16:
raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ")
if args.fp16 != smp.state.cfg.fp16:
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, "
f"but FP16 provided in trainer argument is {args.fp16}, "
f"setting to {smp.state.cfg.fp16}"
)
args.fp16 = smp.state.cfg.fp16
if args.fp16 and args.device == torch.device("cpu") and not is_torch_greater_or_equal_than_2_3:
raise ValueError("Tried to use `fp16` but it is not supported on cpu. You need to have torch>=2.3")
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
# Check for multi-label classification incompatibility
if self.args.label_smoothing_factor > 0:
if getattr(self.model.config, "problem_type", None) == "multi_label_classification":
warnings.warn(
"Label smoothing is not compatible with multi-label classification. "
"Disabling label smoothing for this training run.",
UserWarning,
)
self.label_smoother = None
self.control = TrainerControl()
self.state = TrainerState(
is_local_process_zero=self.is_local_process_zero(),
is_world_process_zero=self.is_world_process_zero(),
stateful_callbacks=[
cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState)
],
)
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
model_to_inspect = self.model
if _is_peft_model(self.model):
if hasattr(self.model, "get_base_model"):
model_to_inspect = self.model.get_base_model()
else:
# PeftMixedModel do not provide a `get_base_model` method
model_to_inspect = self.model.base_model.model
default_label_names = find_labels(model_to_inspect.__class__)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.can_return_loss = can_return_loss(model_to_inspect.__class__)
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# Internal variables to help with automatic batch size reduction
self._train_batch_size = args.train_batch_size
self._created_lr_scheduler = False
# Set use_cache for the model
if getattr(self.model, "config", None) is not None:
self.model.config.use_cache = self.args.use_cache
# very last
self._memory_tracker.stop_and_update_metrics()
self.is_fsdp_xla_v2_enabled = args.fsdp_config.get("xla_fsdp_v2", False)
if self.is_fsdp_xla_v2_enabled:
if not IS_XLA_FSDPV2_POST_2_2:
raise ValueError("FSDPv2 requires `torch_xla` 2.2 or higher.")
# Prepare the SPMD mesh that is going to be used by the data loader and the FSDPv2 wrapper.
# Tensor axis is just a placeholder where it will not be used in FSDPv2.
num_devices = xr.global_runtime_device_count()
xs.set_global_mesh(xs.Mesh(np.array(range(num_devices)), (num_devices, 1), axis_names=("fsdp", "tensor")))
self.is_fsdp_xla_v1_enabled = self.is_fsdp_xla_enabled and not self.is_fsdp_xla_v2_enabled
def _activate_neftune(self, model):
r"""
Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper:
https://huggingface.co/papers/2310.05914
"""
unwrapped_model = self.accelerator.unwrap_model(model)
if _is_peft_model(unwrapped_model):
embeddings = unwrapped_model.base_model.model.get_input_embeddings()
else:
embeddings = unwrapped_model.get_input_embeddings()
del unwrapped_model
embeddings.neftune_noise_alpha = self.neftune_noise_alpha
hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook)
self.neftune_hook_handle = hook_handle
return model
def _deactivate_neftune(self, model):
"""
Deactivates the neftune method. Make sure to call `_activate_neftune` first.
"""
if not hasattr(self, "neftune_hook_handle"):
raise ValueError("Neftune is not activated make sure to call `trainer._activate_neftune()` first")
unwrapped_model = self.accelerator.unwrap_model(model)
if _is_peft_model(unwrapped_model):
embeddings = unwrapped_model.base_model.model.get_input_embeddings()
else:
embeddings = unwrapped_model.get_input_embeddings()
self.neftune_hook_handle.remove()
del embeddings.neftune_noise_alpha, unwrapped_model
def add_callback(self, callback):
"""
Add a callback to the current list of [`~transformers.TrainerCallback`].
Args:
callback (`type` or [`~transformers.TrainerCallback]`):
A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it.
If the callback is not found, returns `None` (and no error is raised).
Args:
callback (`type` or [`~transformers.TrainerCallback]`):
A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
first case, will pop the first member of that class found in the list of callbacks.
Returns:
[`~transformers.TrainerCallback`]: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of [`~transformers.TrainerCallback`].
Args:
callback (`type` or [`~transformers.TrainerCallback]`):
A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _move_model_to_device(self, model, device):
if getattr(model, "hf_device_map", None) is not None:
logger.warning(
"The model is already on multiple devices. Skipping the move to device specified in `args`."
)
return
model = model.to(device)
# Moving a model to an XLA device disconnects the tied weights, so we have to retie them.
if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"):
model.tie_weights()
def _align_special_tokens(self):
"""
Aligns the special tokens of the tokenizer with the model configs.
A new tokens may be defined in the tokenizer for fine-tuning purposes, e.g. an "end of turn" token may be
added on chat models. In that case, we want the model configs to be aligned with the tokenizer, so that all
downstream uses work as expected. This alignment should happen before training, to ensure the prediction step
uses the new tokens as well.
"""
if isinstance(self.processing_class, ProcessorMixin):
tokenizer: PreTrainedTokenizerBase = self.processing_class.tokenizer
else:
tokenizer = self.processing_class
model_has_generation_config = (
hasattr(self.model, "generation_config") and self.model.generation_config is not None
)
updated_tokens = {}
# 1 - Align EOS token. EOS is more complex than the others, as `generation_config` may hold more than one EOS
# token.
tokenizer_has_new_eos = tokenizer.eos_token_id != self.model.config.eos_token_id
if model_has_generation_config:
# `generation_config.eos_token_id` is None: direct comparison
if self.model.generation_config.eos_token_id is None:
tokenizer_has_new_eos |= tokenizer.eos_token_id != self.model.generation_config.eos_token_id
else:
# `generation_config.eos_token_id` is an `int`: convert it to list (and continue below)
if isinstance(self.model.generation_config.eos_token_id, int):
self.model.generation_config.eos_token_id = [self.model.generation_config.eos_token_id]
# `generation_config.eos_token_id` is a `list`: check if the tokenizer's EOS token is in the list
tokenizer_has_new_eos |= tokenizer.eos_token_id not in self.model.generation_config.eos_token_id
if tokenizer_has_new_eos:
updated_tokens["eos_token_id"] = tokenizer.eos_token_id
self.model.config.eos_token_id = tokenizer.eos_token_id
# The generation config may hold more than one EOS token. We preserve the original EOS tokens: any of the
# EOS tokens defined here will halt generation.
if model_has_generation_config:
all_eos_tokens = [tokenizer.eos_token_id]
if self.model.generation_config.eos_token_id is not None:
all_eos_tokens += list(self.model.generation_config.eos_token_id)
self.model.generation_config.eos_token_id = [token for token in all_eos_tokens if token is not None]
# 2 - Align BOS
tokenizer_has_new_bos = tokenizer.bos_token_id != self.model.config.bos_token_id
if model_has_generation_config:
tokenizer_has_new_bos |= tokenizer.bos_token_id != self.model.generation_config.bos_token_id
if tokenizer_has_new_bos:
updated_tokens["bos_token_id"] = tokenizer.bos_token_id
self.model.config.bos_token_id = tokenizer.bos_token_id
if model_has_generation_config:
self.model.generation_config.bos_token_id = tokenizer.bos_token_id
# 3 - Align PAD
tokenizer_has_new_pad = tokenizer.pad_token_id != self.model.config.pad_token_id
if model_has_generation_config:
tokenizer_has_new_pad |= tokenizer.pad_token_id != self.model.generation_config.pad_token_id
if tokenizer_has_new_pad:
updated_tokens["pad_token_id"] = tokenizer.pad_token_id
self.model.config.pad_token_id = tokenizer.pad_token_id
if model_has_generation_config:
self.model.generation_config.pad_token_id = tokenizer.pad_token_id
# 4 - Warn users about the changes
if len(updated_tokens) > 0:
logger.warning(
"The tokenizer has new PAD/BOS/EOS tokens that differ from the model config and generation config. "
"The model config and generation config were aligned accordingly, being updated with the tokenizer's "
f"values. Updated tokens: {updated_tokens}."
)
def _set_signature_columns_if_needed(self):
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
model_to_inspect = self.model
if _is_peft_model(self.model):
if hasattr(self.model, "get_base_model"):
model_to_inspect = self.model.get_base_model()
else:
# PeftMixedModel do not provide a `get_base_model` method
model_to_inspect = self.model.base_model.model
signature = inspect.signature(model_to_inspect.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += list(set(["label", "label_ids"] + self.label_names))
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: str | None = None):
if not self.args.remove_unused_columns:
return dataset
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set"
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, "
" you can safely ignore this message."
)
columns = [k for k in signature_columns if k in dataset.column_names]
if len(columns) == 0:
raise ValueError(
f"No columns in the dataset match the model's forward method signature: ({', '.join(signature_columns)}). "
f"The following columns have been ignored: [{', '.join(ignored_columns)}]. "
"Please check the dataset and model. You may need to set `remove_unused_columns=False` in `TrainingArguments`."
)
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_collator_with_removed_columns(self, data_collator: Callable, description: str | None = None) -> Callable:
"""Wrap the data collator in a callable removing unused columns."""
if not self.args.remove_unused_columns:
return data_collator
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
remove_columns_collator = RemoveColumnsCollator(
data_collator=data_collator,
signature_columns=signature_columns,
logger=logger,
description=description,
model_name=self.model.__class__.__name__,
)
return remove_columns_collator
def _get_train_sampler(self, train_dataset: Dataset | None = None) -> torch.utils.data.Sampler | None:
if train_dataset is None:
train_dataset = self.train_dataset
if train_dataset is None or not has_length(train_dataset):
return None
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
lengths = (
train_dataset[self.args.length_column_name]
if self.args.length_column_name in train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = (
self.processing_class.model_input_names[0] if self.processing_class is not None else None
)
return LengthGroupedSampler(
self.args.train_batch_size * self.args.gradient_accumulation_steps,
dataset=train_dataset,
lengths=lengths,
model_input_name=model_input_name,
)
else:
return RandomSampler(train_dataset)
def _get_dataloader(
self,
dataset: Dataset,
description: str,
batch_size: int,
sampler_fn: Callable[[Dataset], torch.utils.data.Sampler] | None = None,
is_training: bool = False,
dataloader_key: str | None = None,
) -> DataLoader:
"""Create a [`~torch.utils.data.DataLoader`] from the given dataset."""
data_collator = self.data_collator
if is_datasets_available() and isinstance(dataset, datasets.Dataset):
dataset = self._remove_unused_columns(dataset, description=description)
else:
data_collator = self._get_collator_with_removed_columns(self.data_collator, description=description)
# MPS requrires forking if multiple workers are specified
should_fork = torch.backends.mps.is_available() and self.args.dataloader_num_workers > 1
dataloader_params = {
"batch_size": batch_size,
"collate_fn": data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"persistent_workers": self.args.dataloader_persistent_workers,
"multiprocessing_context": "fork" if should_fork else None,
}
if not isinstance(dataset, torch.utils.data.IterableDataset):
if sampler_fn is not None:
dataloader_params["sampler"] = sampler_fn(dataset)
dataloader_params["drop_last"] = self.args.dataloader_drop_last
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
if is_training:
dataloader_params["worker_init_fn"] = partial(
seed_worker, num_workers=self.args.dataloader_num_workers, rank=self.args.process_index
)
dataloader = self.accelerator.prepare(DataLoader(dataset, **dataloader_params))
# Store the prepared dataloader for subsequent evaluations if using persistent workers.
if dataloader_key is not None and self.args.dataloader_persistent_workers:
if hasattr(self, "_eval_dataloaders"):
self._eval_dataloaders[dataloader_key] = dataloader
else:
self._eval_dataloaders = {dataloader_key: dataloader}
return dataloader
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training [`~torch.utils.data.DataLoader`].
Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
return self._get_dataloader(
dataset=self.train_dataset,
description="Training",
batch_size=self._train_batch_size,
sampler_fn=self._get_train_sampler,
is_training=True,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> torch.utils.data.Sampler | None:
if eval_dataset is None or not has_length(eval_dataset):
return None
if self.args.group_by_length:
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
lengths = (
eval_dataset[self.args.length_column_name]
if self.args.length_column_name in eval_dataset.column_names
else None
)
else:
lengths = None
model_input_name = (
self.processing_class.model_input_names[0] if self.processing_class is not None else None
)
return LengthGroupedSampler(
self.args.eval_batch_size,
dataset=eval_dataset,
lengths=lengths,
model_input_name=model_input_name,
)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return None
def get_eval_dataloader(self, eval_dataset: str | Dataset | None = None) -> DataLoader:
"""
Returns the evaluation [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (`str` or `torch.utils.data.Dataset`, *optional*):
If a `str`, will use `self.eval_dataset[eval_dataset]` as the evaluation dataset. If a `Dataset`, will override `self.eval_dataset` and must implement `__len__`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
# If we have persistent workers, don't do a fork bomb especially as eval datasets
# don't change during training
dataloader_key = eval_dataset if isinstance(eval_dataset, str) else "eval"
if (
hasattr(self, "_eval_dataloaders")
and dataloader_key in self._eval_dataloaders
and self.args.dataloader_persistent_workers
):
return self._eval_dataloaders[dataloader_key]
eval_dataset = (
self.eval_dataset[eval_dataset]
if isinstance(eval_dataset, str)
else eval_dataset
if eval_dataset is not None
else self.eval_dataset
)
return self._get_dataloader(
dataset=eval_dataset,
description="Evaluation",
batch_size=self.args.eval_batch_size,
sampler_fn=self._get_eval_sampler,
dataloader_key=dataloader_key,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (`torch.utils.data.Dataset`, *optional*):
The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. It must implement `__len__`.
"""
return self._get_dataloader(
dataset=test_dataset,
description="test",
batch_size=self.args.eval_batch_size,
sampler_fn=self._get_eval_sampler,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
`create_scheduler`) in a subclass.
"""
self.create_optimizer()
if is_sagemaker_mp_enabled() and smp.state.cfg.fp16:
# If fp16 is enabled, we unwrap the optimizer
optimizer = self.optimizer.optimizer
else:
optimizer = self.optimizer
self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
def get_decay_parameter_names(self, model) -> list[str]:
"""
Get all parameter names that weight decay will be applied to.
This function filters out parameters in two ways:
1. By layer type (instances of layers specified in ALL_LAYERNORM_LAYERS)
2. By parameter name patterns (containing 'bias', or variation of 'norm')
"""
forbidden_name_patterns = [r"bias", r"layernorm", r"rmsnorm", r"(?:^|\.)norm(?:$|\.)", r"_norm(?:$|\.)"]
decay_parameters = get_parameter_names(model, [nn.LayerNorm], forbidden_name_patterns)
return decay_parameters
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method in a subclass.
"""
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if self.optimizer is None:
decay_parameters = self.get_decay_parameter_names(opt_model)
optimizer_grouped_parameters = [
{
"params": [
p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
if self.optimizer_cls_and_kwargs is not None:
optimizer_cls, optimizer_kwargs = self.optimizer_cls_and_kwargs
else:
optimizer_cls, optimizer_kwargs = self.get_optimizer_cls_and_kwargs(self.args, opt_model)
# Overwrite `params` in case it's created by `get_optimizer_cls_and_kwargs`
# e.g. for GaLore optimizer.
if "params" in optimizer_kwargs:
optimizer_grouped_parameters = optimizer_kwargs.pop("params")
# Overwrite `model` in case it's created by `get_optimizer_cls_and_kwargs`
# e.g. for LOMO optimizer.
if "model" in optimizer_kwargs:
optimizer_grouped_parameters = optimizer_kwargs.pop("model")
# For layer-wise dummy optimizers we overwrite optimizer_grouped_parameters with `optimizer_dict`
# to avoid arguments conflicts.
if "optimizer_dict" in optimizer_kwargs:
optimizer_grouped_parameters = optimizer_kwargs.pop("optimizer_dict")
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if "bitsandbytes" in str(optimizer_cls) and optimizer_kwargs.get("optim_bits", None) == 8:
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
logger.info(f"skipped {module}: {skipped / 2**20}M params")
manager.register_module_override(module, "weight", {"optim_bits": 32})
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
logger.info(f"skipped: {skipped / 2**20}M params")
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer
def get_num_trainable_parameters(self):
"""
Get the number of trainable parameters.
"""
return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
def get_learning_rates(self):
"""
Returns the learning rate of each parameter from self.optimizer.
"""
if self.optimizer is None:
raise ValueError("Trainer optimizer is None, please make sure you have setup the optimizer before.")
return [group["lr"] for group in self.optimizer.param_groups]
def get_optimizer_group(self, param: str | torch.nn.parameter.Parameter | None = None):
"""
Returns optimizer group for a parameter if given, else returns all optimizer groups for params.
Args:
param (`str` or `torch.nn.parameter.Parameter`, *optional*):
The parameter for which optimizer group needs to be returned.
"""
if self.optimizer is None:
raise ValueError("Trainer optimizer is None, please make sure you have setup the optimizer before.")
if param is not None:
for group in self.optimizer.param_groups:
if param in group["params"]:
return group
return [group["params"] for group in self.optimizer.param_groups]
@staticmethod
def get_optimizer_cls_and_kwargs(args: TrainingArguments, model: PreTrainedModel | None = None) -> tuple[Any, Any]:
"""
Returns the optimizer class and optimizer parameters based on the training arguments.
Args:
args (`transformers.training_args.TrainingArguments`):
The training arguments for the training session.
"""
# parse args.optim_args
optim_args = {}
if args.optim_args:
for mapping in args.optim_args.replace(" ", "").split(","):
key, value = mapping.split("=")
optim_args[key] = value
optimizer_kwargs = {"lr": args.learning_rate}
adam_kwargs = {
"betas": (args.adam_beta1, args.adam_beta2),
"eps": args.adam_epsilon,
}
def setup_low_rank_optimizer(
optimizer_name: str,
optimizer_mapping: dict[str, Any],
optim_kwargs: dict[str, Any],
is_layerwise_supported: bool = True,
) -> tuple[Any, Any]:
"""
Helper function to set up low-rank optimizers like GaLore and Apollo.
Args:
optimizer_name (str): Name of the optimizer.
optimizer_mapping (dict): Mapping of optimizer names to their classes.
optim_kwargs (dict): Keyword arguments for the optimizer.
is_layerwise_supported (bool): Whether layerwise optimization is supported.
Returns:
tuple[Any, Any]: Optimizer class and updated optimizer kwargs.
"""
is_layerwise = optimizer_name.lower().endswith("layerwise")
if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED and is_layerwise_supported:
raise NotImplementedError(f"Layer-wise {optimizer_name} does not support DDP at this time")
optimizer_cls = optimizer_mapping[optimizer_name]
if args.optim_target_modules is None:
raise ValueError(f"You need to define `optim_target_modules` to use {optimizer_name} optimizers")
if not isinstance(args.optim_target_modules, (list, str)):
raise TypeError(
f"`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. Got: {args.optim_target_modules}"
)
if model is None:
raise ValueError(f"You need to pass a model to initialize {optimizer_name} optimizer.")
all_linear = (
isinstance(args.optim_target_modules, str)
and args.optim_target_modules.replace("_", "-") == "all-linear"
)
target_params_names = []
for module_name, module in model.named_modules():
target_module_exists, is_regex = check_target_module_exists(
args.optim_target_modules, module_name, return_is_regex=True
)
if not isinstance(module, nn.Linear):
if target_module_exists and not is_regex:
logger.warning(
f"{module_name} matched but ignored. {optimizer_name} only supports linear layers."
)
continue
if not target_module_exists and not all_linear:
continue
target_params_names.append(module_name + ".weight")
if len(target_params_names) == 0:
raise ValueError(f"No target modules found for {optimizer_name} ({args.optim_target_modules}).")
target_params = [p for n, p in model.named_parameters() if n in target_params_names]
non_target_params = [p for n, p in model.named_parameters() if n not in target_params_names]
optim_kwargs.update(optim_args)
param_groups = [
{"params": non_target_params},
{"params": target_params, **optim_kwargs},
]
if is_layerwise:
if args.gradient_accumulation_steps != 1:
raise ValueError(f"Layerwise {optimizer_name} does not support gradient accumulation!")
optimizer_dict = {}
for param in non_target_params:
optimizer_dict[param] = optimizer_cls([{"params": [param]}], **optimizer_kwargs)
for param in target_params:
optimizer_dict[param] = optimizer_cls([{"params": [param], **optim_kwargs}], **optimizer_kwargs)
def optimizer_hook(param):
if param.grad is not None:
optimizer_dict[param].step()
optimizer_dict[param].zero_grad()
for param in model.parameters():
if param.requires_grad:
param.register_post_accumulate_grad_hook(optimizer_hook)
optimizer_cls = LayerWiseDummyOptimizer
optimizer_kwargs.update({"optimizer_dict": optimizer_dict})
optimizer_kwargs.update({"params": param_groups})
return optimizer_cls, optimizer_kwargs
if args.optim == OptimizerNames.ADAFACTOR:
optimizer_cls = Adafactor
optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]:
from torch.optim import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
if args.optim == OptimizerNames.ADAMW_TORCH_FUSED:
optimizer_kwargs.update({"fused": True})
elif args.optim == OptimizerNames.ADAMW_TORCH_XLA:
try:
from torch_xla.amp.syncfree import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.")
elif args.optim == OptimizerNames.ADAMW_TORCH_NPU_FUSED:
try:
from torch_npu.optim import NpuFusedAdamW
optimizer_cls = NpuFusedAdamW
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer failed to import FusedAdamW from torch_npu.")
elif args.optim == OptimizerNames.ADAMW_APEX_FUSED:
try:
from apex.optimizers import FusedAdam
optimizer_cls = FusedAdam
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!")
elif args.optim in [
OptimizerNames.ADAMW_BNB,
OptimizerNames.ADAMW_8BIT,
OptimizerNames.PAGED_ADAMW,
OptimizerNames.PAGED_ADAMW_8BIT,
OptimizerNames.ADEMAMIX,
OptimizerNames.ADEMAMIX_8BIT,
OptimizerNames.PAGED_ADEMAMIX,
OptimizerNames.PAGED_ADEMAMIX_8BIT,
OptimizerNames.LION,
OptimizerNames.LION_8BIT,
OptimizerNames.PAGED_LION,
OptimizerNames.PAGED_LION_8BIT,
OptimizerNames.RMSPROP_BNB,
OptimizerNames.RMSPROP_8BIT,
OptimizerNames.RMSPROP_32BIT,
]:
if not is_bitsandbytes_available():
raise ImportError(
"You need to install `bitsandbytes` in order to use bitsandbytes optimizers: `pip install -U bitsandbytes`"
)
from bitsandbytes.optim import AdamW, Lion, RMSprop
is_paged = False
optim_bits = 32
optimizer_cls = None
additional_optim_kwargs = adam_kwargs
if "paged" in args.optim:
is_paged = True
if "8bit" in args.optim:
optim_bits = 8
if "adam" in args.optim:
optimizer_cls = AdamW
elif "lion" in args.optim:
optimizer_cls = Lion
additional_optim_kwargs = {"betas": (args.adam_beta1, args.adam_beta2)}
elif "rmsprop" in args.optim:
optimizer_cls = RMSprop
# Above we pass all `adam_kwargs` to the optimizer, here
# we only pass `optim_args` which can be passed by the user.
additional_optim_kwargs = optim_args
elif "ademamix" in args.optim:
from bitsandbytes.optim import AdEMAMix
optimizer_cls = AdEMAMix
additional_optim_kwargs = {
"betas": (
float(optim_args.get("beta1", args.adam_beta1)),
float(optim_args.get("beta2", args.adam_beta2)),
float(optim_args.get("beta3", 0.9999)),
),
"alpha": float(optim_args.get("alpha", 5.0)),
"eps": float(optim_args.get("eps", args.adam_epsilon)),
}
if "t_alpha" in optim_args:
additional_optim_kwargs["t_alpha"] = int(optim_args["t_alpha"])
if "t_beta3" in optim_args:
additional_optim_kwargs["t_beta3"] = int(optim_args["t_beta3"])
bnb_kwargs = {"optim_bits": optim_bits}
if "rmsprop" not in args.optim:
bnb_kwargs["is_paged"] = is_paged
optimizer_kwargs.update(additional_optim_kwargs)
optimizer_kwargs.update(bnb_kwargs)
elif args.optim == OptimizerNames.ADAMW_ANYPRECISION:
try:
from torchdistx.optimizers import AnyPrecisionAdamW
optimizer_cls = AnyPrecisionAdamW
optimizer_kwargs.update(adam_kwargs)
# TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx.
optimizer_kwargs.update(
{
"use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")),
"momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")),
"variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")),
"compensation_buffer_dtype": getattr(
torch, optim_args.get("compensation_buffer_dtype", "bfloat16")
),
}
)
except ImportError:
raise ValueError("Please install https://github.com/pytorch/torchdistx")
elif args.optim == OptimizerNames.SGD:
optimizer_cls = torch.optim.SGD
elif args.optim == OptimizerNames.ADAGRAD:
optimizer_cls = torch.optim.Adagrad
elif args.optim == OptimizerNames.RMSPROP:
optimizer_cls = torch.optim.RMSprop
elif args.optim in [
OptimizerNames.GALORE_ADAMW,
OptimizerNames.GALORE_ADAMW_8BIT,
OptimizerNames.GALORE_ADAFACTOR,
OptimizerNames.GALORE_ADAMW_LAYERWISE,
OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE,
OptimizerNames.GALORE_ADAFACTOR_LAYERWISE,
]:
if not is_galore_torch_available():
raise ImportError(
"You need to install `galore_torch` in order to use GaLore optimizers"
" install it with `pip install git+https://github.com/jiaweizzhao/GaLore`"
)
from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit
optimizer_mapping = {
OptimizerNames.GALORE_ADAMW: GaLoreAdamW,
OptimizerNames.GALORE_ADAMW_8BIT: GaLoreAdamW8bit,
OptimizerNames.GALORE_ADAFACTOR: GaLoreAdafactor,
OptimizerNames.GALORE_ADAMW_LAYERWISE: GaLoreAdamW,
OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE: GaLoreAdamW8bit,
OptimizerNames.GALORE_ADAFACTOR_LAYERWISE: GaLoreAdafactor,
}
galore_optim_kwargs = {
"rank": int(optim_args.pop("rank", 128)),
"update_proj_gap": int(optim_args.pop("update_proj_gap", 200)),
"scale": float(optim_args.pop("scale", 0.25)),
"proj_type": optim_args.pop("proj_type", "std"),
}
optimizer_cls, optimizer_kwargs = setup_low_rank_optimizer(
args.optim, optimizer_mapping, galore_optim_kwargs
)
if args.optim == OptimizerNames.GALORE_ADAFACTOR:
optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
elif args.optim in [
OptimizerNames.APOLLO_ADAMW,
OptimizerNames.APOLLO_ADAMW_LAYERWISE,
]:
if not is_apollo_torch_available():
raise ImportError(
"You need to install `apollo_torch` in order to use APOLLO optimizers"
" install it with `pip install git+https://github.com/zhuhanqing/APOLLO`"
)
from apollo_torch import APOLLOAdamW
optimizer_mapping = {
OptimizerNames.APOLLO_ADAMW: APOLLOAdamW,
OptimizerNames.APOLLO_ADAMW_LAYERWISE: APOLLOAdamW,
}
apollo_optim_kwargs = {
"rank": int(optim_args.pop("rank", 128)),
"proj": optim_args.pop("proj", "random"),
"scale_type": optim_args.pop("scale_type", "channel"),
"update_proj_gap": int(optim_args.pop("update_proj_gap", 200)),
"scale": float(optim_args.pop("scale", 1.0)),
"proj_type": optim_args.pop("proj_type", "std"),
}
apollo_optim_kwargs.update(adam_kwargs)
optimizer_cls, optimizer_kwargs = setup_low_rank_optimizer(
args.optim, optimizer_mapping, apollo_optim_kwargs
)
elif args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:
if not is_lomo_available():
raise ImportError(
"You need to install `lomo_optim` in order to use LOMO optimizers"
" install it with `pip install lomo-optim`"
)
if model is None:
raise ValueError("You need to pass a `model` in order to correctly initialize a LOMO optimizer.")
from lomo_optim import AdaLomo, Lomo
if "ada" in args.optim:
optimizer_cls = AdaLomo
else:
optimizer_cls = Lomo
optimizer_kwargs.update({"model": model})
elif args.optim == OptimizerNames.GROKADAMW:
if not is_grokadamw_available():
raise ValueError("Please install grokadamw with `pip install grokadamw`")
from grokadamw import GrokAdamW
optimizer_cls = GrokAdamW
optimizer_kwargs.update(
{
"alpha_init": float(optim_args.get("alpha_init", 0.98)),
"lamb": float(optim_args.get("lamb", 2.0)),
"gamma": float(optim_args.get("gamma", 0.1)),
"grokking_signal_decay_rate": float(optim_args.get("grokking_signal_decay_rate", 0.1)),
"gradient_clipping": float(optim_args.get("gradient_clipping", 1.0)),
}
)
elif args.optim in [
OptimizerNames.ADAMW_TORCH_4BIT,
OptimizerNames.ADAMW_TORCH_8BIT,
]:
if not is_torchao_available() or version.parse(importlib.metadata.version("torchao")) < version.parse(
"0.4.0"
):
raise ImportError(
"You need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers."
"Install it with `pip install torchao` or follow the instructions here: https://github.com/pytorch/ao"
)
if version.parse(importlib.metadata.version("torch")) <= version.parse("2.4"):
raise ImportError(
"You need to have `torch>2.4` in order to use torch 4-bit optimizers. "
"Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly."
)
if version.parse(importlib.metadata.version("torchao")) >= version.parse("0.11.0"):
# https://github.com/pytorch/ao/pull/2159
from torchao.optim import AdamW4bit, AdamW8bit
else:
from torchao.prototype.low_bit_optim import AdamW4bit, AdamW8bit
if args.optim == OptimizerNames.ADAMW_TORCH_4BIT:
optimizer_cls = AdamW4bit
elif args.optim == OptimizerNames.ADAMW_TORCH_8BIT:
optimizer_cls = AdamW8bit
else:
raise ValueError("Invalid optimizer")
optimizer_kwargs.update(adam_kwargs)
elif args.optim in [
OptimizerNames.SCHEDULE_FREE_RADAM,
OptimizerNames.SCHEDULE_FREE_ADAMW,
OptimizerNames.SCHEDULE_FREE_SGD,
]:
if not is_schedulefree_available():
raise ImportError(
"You need to install `schedulefree` in order to use schedulefree optimizers. "
"Install it with `pip install schedulefree.`"
)
from schedulefree import AdamWScheduleFree, SGDScheduleFree
additional_optim_kwargs = {}
require_warmup = True
if args.optim == OptimizerNames.SCHEDULE_FREE_RADAM:
if not is_schedulefree_available("1.4.0"):
raise ImportError(
"You need to install `schedulefree>=1.4.0` in order to use RAdamScheduleFree optimizer. "
"Install it with `pip install schedulefree.`"
)
from schedulefree import RAdamScheduleFree
optimizer_cls = RAdamScheduleFree
additional_optim_kwargs = adam_kwargs
require_warmup = False
elif args.optim == OptimizerNames.SCHEDULE_FREE_ADAMW:
optimizer_cls = AdamWScheduleFree
additional_optim_kwargs = adam_kwargs
elif args.optim == OptimizerNames.SCHEDULE_FREE_SGD:
optimizer_cls = SGDScheduleFree
else:
raise ValueError("Invalid schedulefree optimizer")
additional_optim_kwargs["weight_decay"] = args.weight_decay
if require_warmup:
additional_optim_kwargs["warmup_steps"] = args.warmup_steps
additional_optim_kwargs.update(
{
"weight_lr_power": float(optim_args.get("weight_lr_power", 2.0)),
"r": float(optim_args.get("r", 0.0)),
}
)
optimizer_kwargs.update(additional_optim_kwargs)
elif args.optim == OptimizerNames.STABLE_ADAMW:
if not is_torch_optimi_available():
raise ImportError(
"You need to install `torch-optimi` in order to use stable_adamw optimizers. "
"Install it with `pip install torch-optimi`."
)
from optimi import StableAdamW
max_lr = optim_args.pop("max_lr", None)
if max_lr is not None:
max_lr = float(max_lr)
kahan_sum = optim_args.pop("kahan_sum", None)
if kahan_sum is not None:
kahan_sum = bool(kahan_sum)
adam_kwargs["weight_decay"] = args.weight_decay
stable_adamw_kwargs = {
"decouple_lr": bool(optim_args.pop("decouple_lr", False)),
"max_lr": max_lr,
"kahan_sum": kahan_sum,
}
optimizer_cls = StableAdamW
optimizer_kwargs.update(adam_kwargs)
optimizer_kwargs.update(stable_adamw_kwargs)
else:
raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}")
return optimizer_cls, optimizer_kwargs
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
"""
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
passed as an argument.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
scheduler_specific_kwargs=self.args.lr_scheduler_kwargs,
)
self._created_lr_scheduler = True
return self.lr_scheduler
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
dataloader.dataset does not exist or has no length, estimates as best it can
"""
try:
dataset = dataloader.dataset
# Special case for IterableDatasetShard, we need to dig deeper
if isinstance(dataset, IterableDatasetShard):
return len(dataloader.dataset.dataset)
return len(dataloader.dataset)
except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader
return len(dataloader) * self.args.per_device_train_batch_size
@staticmethod
def num_tokens(train_dl: DataLoader, max_steps: int | None = None) -> int:
"""
Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader.
"""
train_tokens = 0
try:
for batch in train_dl:
tokens = batch["input_ids"].numel()
if max_steps is not None:
return tokens * max_steps
train_tokens += tokens
except KeyError:
logger.warning("Cannot get num_tokens from dataloader")
return train_tokens
def _hp_search_setup(self, trial: Union["optuna.Trial", dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
elif self.hp_search_backend == HPSearchBackend.WANDB:
params = trial
for key, value in params.items():
if not hasattr(self.args, key):
logger.warning(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in"
" `TrainingArguments`."
)
continue
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info(f"Trial: {trial.params}")
if self.hp_search_backend == HPSearchBackend.WANDB:
logger.info(f"W&B Sweep parameters: {trial}")
if self.is_deepspeed_enabled:
if self.args.deepspeed is None:
raise ValueError("For sweeps with deepspeed, `args.deepspeed` must be set")
self.accelerator.free_memory()
# Rebuild the deepspeed config to reflect the updated training parameters
from accelerate.utils import DeepSpeedPlugin
from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig
self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed)
self.args.hf_deepspeed_config.trainer_config_process(self.args)
self.args.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.args.hf_deepspeed_config)
# From 1.0 on, we need to fully wipe the DS plugin when doing sweeps.
# Simply calling `_reset_state` is enough and doesn't need a version pin.
AcceleratorState()._reset_state()
self.create_accelerator_and_postprocess()
def _report_to_hp_search(self, trial: Union["optuna.Trial", dict[str, Any]], step: int, metrics: dict[str, float]):
if self.hp_search_backend is None or trial is None:
return
metrics = metrics.copy()
self.objective = self.compute_objective(metrics)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
if hasattr(trial, "study") and not trial.study._is_multi_objective():
trial.report(self.objective, step)
if trial.should_prune():
self.callback_handler.on_train_end(self.args, self.state, self.control)
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
import ray.tune
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
checkpoint = None
if self.control.should_save:
self._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir)
checkpoint = ray.tune.Checkpoint.from_directory(temp_checkpoint_dir)
metrics["objective"] = self.objective
ray.tune.report(metrics, checkpoint=checkpoint)
def _tune_save_checkpoint(self, checkpoint_dir: str):
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir, _internal_call=True)
if self.args.should_save:
# Update the `TrainerControl` state to where we are currently
self.state.stateful_callbacks["TrainerControl"] = self.control.state()
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
def call_model_init(self, trial=None):
model_init_argcount = number_of_arguments(self.model_init)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def compare_trainer_and_checkpoint_args(self, training_args, trainer_state):
attributes_map = {
"logging_steps": "logging_steps",
"eval_steps": "eval_steps",
"save_steps": "save_steps",
}
has_warning = False
warning_str = "Warning: The following arguments do not match the ones in the `trainer_state.json` within the checkpoint directory: "
for arg_attr, state_attr in attributes_map.items():
arg_value = getattr(training_args, arg_attr, None)
state_value = getattr(trainer_state, state_attr, None)
if arg_value is not None and state_value is not None and arg_value != state_value:
warning_str += f"\n\t{arg_attr}: {arg_value} (from args) != {state_value} (from trainer_state.json)"
has_warning = True
# train bs is special as we need to account for multi-GPU
train_bs_args = training_args.per_device_train_batch_size
train_bs_state = trainer_state.train_batch_size // max(1, training_args.n_gpu)
if train_bs_args != train_bs_state:
warning_str += f"\n\tper_device_train_batch_size: {train_bs_args} (from args) != {train_bs_state} (from trainer_state.json)"
has_warning = True
if has_warning:
logger.warning_once(warning_str)
def _wrap_model(self, model, training=True, dataloader=None):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if self.accelerator.unwrap_model(model, keep_torch_compile=False) is not model:
return model
# Multi-gpu training, 8bit models does not support DP
if self.args.n_gpu > 1 and not getattr(model, "is_loaded_in_8bit", False):
model = nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training using PyTorch FSDP
if self.is_fsdp_xla_enabled:
try:
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP
from torch_xla.distributed.fsdp import checkpoint_module
from torch_xla.distributed.fsdp.wrap import (
size_based_auto_wrap_policy,
transformer_auto_wrap_policy,
)
if self.is_fsdp_xla_v2_enabled:
from torch_xla.experimental.spmd_fully_sharded_data_parallel import (
SpmdFullyShardedDataParallel as FSDPv2,
)
except ImportError:
raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.")
auto_wrap_policy = None
auto_wrapper_callable = None
default_transformer_cls_names_to_wrap = getattr(model, "_no_split_modules", None)
fsdp_transformer_layer_cls_to_wrap = self.args.fsdp_config.get(
"transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap
)
if self.args.fsdp_config["min_num_params"] > 0:
auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["min_num_params"]
)
elif fsdp_transformer_layer_cls_to_wrap is not None:
transformer_cls_to_wrap = set()
for layer_class in fsdp_transformer_layer_cls_to_wrap:
transformer_cls = get_module_class_from_name(model, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
# Transformer layer class to wrap
transformer_layer_cls=transformer_cls_to_wrap,
)
fsdp_kwargs = self.args.xla_fsdp_config
if self.args.fsdp_config["xla_fsdp_grad_ckpt"]:
if model.config.use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
model.config.use_cache = False
# Apply gradient checkpointing to auto-wrapped sub-modules if specified
def auto_wrapper_callable(m, *args, **kwargs):
target_cls = FSDP if not self.is_fsdp_xla_v2_enabled else FSDPv2
return target_cls(checkpoint_module(m), *args, **kwargs)
# Wrap the base model with an outer FSDP wrapper
if self.is_fsdp_xla_v2_enabled:
def shard_output(output, mesh):
from .modeling_outputs import CausalLMOutputWithPast
real_output = None
if isinstance(output, torch.Tensor):
real_output = output
elif isinstance(output, tuple):
real_output = output[0]
elif isinstance(output, CausalLMOutputWithPast):
real_output = output.logits
if real_output is None:
raise ValueError("Something went wrong, the output of the model shouldn't be `None`")
xs.mark_sharding(real_output, mesh, ("fsdp", None, None))
self.model = model = FSDPv2(
model,
shard_output=shard_output,
auto_wrap_policy=auto_wrap_policy,
auto_wrapper_callable=auto_wrapper_callable,
)
else:
self.model = model = FSDP(
model,
auto_wrap_policy=auto_wrap_policy,
auto_wrapper_callable=auto_wrapper_callable,
**fsdp_kwargs,
)
# Patch `xm.optimizer_step` should not reduce gradients in this case,
# as FSDP does not need gradient reduction over sharded parameters.
def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}):
loss = optimizer.step(**optimizer_args)
if barrier:
xm.mark_step()
return loss
xm.optimizer_step = patched_optimizer_step
elif is_sagemaker_dp_enabled():
model = nn.parallel.DistributedDataParallel(
model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))]
)
elif self.args.parallel_mode == ParallelMode.DISTRIBUTED:
if is_torch_neuroncore_available():
return model
kwargs = {}
if self.args.ddp_find_unused_parameters is not None:
kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing
else:
kwargs["find_unused_parameters"] = True
if self.args.ddp_bucket_cap_mb is not None:
kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb
if self.args.ddp_broadcast_buffers is not None:
kwargs["broadcast_buffers"] = self.args.ddp_broadcast_buffers
self.accelerator.ddp_handler = DistributedDataParallelKwargs(**kwargs)
return model
def train(
self,
resume_from_checkpoint: str | bool | None = None,
trial: Union["optuna.Trial", dict[str, Any], None] = None,
ignore_keys_for_eval: list[str] | None = None,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (`str` or `bool`, *optional*):
If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
`bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
trial (`optuna.Trial` or `dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (`list[str]`, *optional*)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
"""
if resume_from_checkpoint is False:
resume_from_checkpoint = None
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# If the model uses a tokenizer, it may have a new tokens for fine-tuning purposes.
if isinstance(self.processing_class, (PreTrainedTokenizerBase, ProcessorMixin)) and hasattr(
self.model, "config"
):
self._align_special_tokens()
# Attach NEFTune hooks if necessary
if self.neftune_noise_alpha is not None:
self.model = self._activate_neftune(self.model)
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if (
(args.fp16_full_eval or args.bf16_full_eval)
and not args.do_train
and not self.is_model_parallel
and self.model_init is None
):
self._move_model_to_device(self.model, args.device)
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
self._train_batch_size = self.args.train_batch_size
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
if not is_sagemaker_mp_enabled() and not self.is_deepspeed_enabled and not self.is_fsdp_enabled:
self._load_from_checkpoint(resume_from_checkpoint)
# In case of repeating the find_executable_batch_size, set `self._train_batch_size` properly
state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))
if state.train_batch_size is not None:
self._train_batch_size = state.train_batch_size
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
inner_training_loop = find_executable_batch_size(
self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
)
if args.push_to_hub:
try:
# Disable progress bars when uploading models during checkpoints to avoid polluting stdout
hf_hub_utils.disable_progress_bars()
return inner_training_loop(
args=args,
resume_from_checkpoint=resume_from_checkpoint,
trial=trial,
ignore_keys_for_eval=ignore_keys_for_eval,
)
finally:
hf_hub_utils.enable_progress_bars()
else:
return inner_training_loop(
args=args,
resume_from_checkpoint=resume_from_checkpoint,
trial=trial,
ignore_keys_for_eval=ignore_keys_for_eval,
)
def get_sp_size(self) -> int:
"""Get the sequence parallel size"""
if getattr(self.accelerator, "parallelism_config", None) is None:
return 1
else:
pc = self.accelerator.parallelism_config
return pc.sp_size
def get_cp_size(self) -> int:
"""Get the context parallel size"""
if getattr(self.accelerator, "parallelism_config", None) is None:
return 1
else:
pc = self.accelerator.parallelism_config
return pc.cp_size
def get_tp_size(self) -> int:
"""Get the tensor parallel size from either the model or DeepSpeed config."""
# 1. Check model.tp_size first
if (model_tp := getattr(self.model, "_tp_size", None)) is not None:
return model_tp
# 2. Fall back to DeepSpeed config if enabled
if self.is_deepspeed_enabled and (deepspeed_config := getattr(self.args, "hf_deepspeed_config", None)):
return deepspeed_config.config.get("tensor_parallel", {}).get("autotp_size", 1)
# 3. Default fallback
return 1
def get_total_train_batch_size(self, args) -> int:
"""Calculates total batch size (micro_batch * grad_accum * dp_world_size).
Accounts for all parallelism dimensions: TP, CP, and SP.
Formula: dp_world_size = world_size // (tp_size * cp_size * sp_size)
Where:
- TP (Tensor Parallelism): Model layers split across GPUs
- CP (Context Parallelism): Sequences split using Ring Attention (FSDP2)
- SP (Sequence Parallelism): Sequences split using ALST/Ulysses (DeepSpeed)
All dimensions are separate and multiplicative: world_size = dp_size * tp_size * cp_size * sp_size
"""
dp_world_size = args.world_size // self.get_tp_size() // self.get_cp_size() // self.get_sp_size()
return self._train_batch_size * args.gradient_accumulation_steps * dp_world_size
def _inner_training_loop(
self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None
):
self.accelerator.free_memory()
self._train_batch_size = batch_size
if self.args.auto_find_batch_size:
if self.state.train_batch_size != self._train_batch_size:
release_memory(self.model_wrapped)
self.model_wrapped = self.model
# Check for DeepSpeed *after* the initial pass and modify the config
if self.is_deepspeed_enabled:
# Temporarily unset `self.args.train_batch_size`
original_bs = self.args.per_device_train_batch_size
self.args.per_device_train_batch_size = self._train_batch_size // max(1, self.args.n_gpu)
self.propagate_args_to_deepspeed(True)
self.args.per_device_train_batch_size = original_bs
self.state.train_batch_size = self._train_batch_size
logger.debug(f"Currently training with a batch size of: {self._train_batch_size}")
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
if self.is_fsdp_xla_v2_enabled:
train_dataloader = tpu_spmd_dataloader(train_dataloader)
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = self.get_total_train_batch_size(args)
(
num_train_epochs,
num_update_steps_per_epoch,
num_examples,
num_train_samples,
epoch_based,
len_dataloader,
max_steps,
) = self.set_initial_training_values(args, train_dataloader, total_train_batch_size)
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP"
" (torchrun or torch.distributed.launch (deprecated))."
)
else:
DebugUnderflowOverflow(self.model)
delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled
# Can't delay optimizer creation when using FSDP2: https://github.com/huggingface/accelerate/blob/3f636d626063ffcf9a337c7d3624d61b7d187d59/src/accelerate/accelerator.py#L1404
is_fsdp2 = self.is_fsdp_enabled and (getattr(self.accelerator.state.fsdp_plugin, "fsdp_version", 1) == 2)
if is_fsdp2:
delay_optimizer_creation = False
# We need to reset the scheduler, as its parameters may be different on subsequent calls
if self._created_lr_scheduler:
self.lr_scheduler = None
self._created_lr_scheduler = False
if self.is_deepspeed_enabled:
self.optimizer, self.lr_scheduler = deepspeed_init(self, num_training_steps=max_steps)
if not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState(
stateful_callbacks=[
cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState)
]
)
self.state.is_hyper_param_search = trial is not None
self.state.train_batch_size = self._train_batch_size
# Compute absolute values for logging, eval, and save if given as ratio
self.state.compute_steps(args, max_steps)
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=args.gradient_checkpointing_kwargs)
model = self._wrap_model(self.model_wrapped)
# as the model is wrapped, don't use `accelerator.prepare`
# this is for unhandled cases such as
# FSDP-XLA, SageMaker MP/DP, DataParallel, IPEX
use_accelerator_prepare = model is self.model
if use_accelerator_prepare and self.is_fsdp_enabled:
# In case of auto_find_batch_size=True
# Remove FSDP wrapping from sub-models.
self.model = unwrap_model(self.model, recursive=True)
if delay_optimizer_creation:
if use_accelerator_prepare:
# configure fsdp plugin for qlora if any
self._fsdp_qlora_plugin_updates()
if self.accelerator.mixed_precision != "fp8":
self.model = self.accelerator.prepare(self.model)
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# prepare using `accelerator` prepare
if use_accelerator_prepare:
self.model.train()
if hasattr(self.lr_scheduler, "step"):
# We should avoid accelerate preparing the model in TP case since we dont need it as it is handled by transformers from_pretrained and also it goes into DDP based preparation.
if self.is_tp_enabled:
self.optimizer = self.accelerator.prepare(self.optimizer)
else:
model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer)
else:
# to handle cases wherein we pass "DummyScheduler" such as when it is specified in DeepSpeed config.
model, self.optimizer, self.lr_scheduler = self.accelerator.prepare(
self.model, self.optimizer, self.lr_scheduler
)
else:
self.optimizer = self.accelerator.prepare(self.optimizer)
# since DataLoader was Accelerate prepared w/o a model arg in the same call, we now have to complete the DL wrapping for ALST/UlyssesSP, after model has been prepared
pc = getattr(self.accelerator, "parallelism_config", None)
if pc is not None and pc.sp_backend == "deepspeed" and pc.sp_enabled:
train_dataloader = self.accelerator.deepspeed_ulysses_dl_adapter(train_dataloader, model)
if self.is_fsdp_enabled:
self.model = self.model_wrapped = model
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# backward compatibility
if self.is_deepspeed_enabled:
self.deepspeed = self.model_wrapped
# ckpt loading
if resume_from_checkpoint is not None:
if self.is_deepspeed_enabled:
deepspeed_load_checkpoint(
self.model_wrapped, resume_from_checkpoint, load_module_strict=not _is_peft_model(self.model)
)
elif is_sagemaker_mp_enabled() or self.is_fsdp_enabled:
self._load_from_checkpoint(resume_from_checkpoint, self.model_wrapped)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
self._load_scaler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model),
# FSDP(Transformers Model), Dynamo Optimized Module(Transformers Model) etc.
# Train!
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples:,}")
logger.info(f" Num Epochs = {num_train_epochs:,}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}")
if self.args.per_device_train_batch_size != self._train_batch_size:
logger.info(f" Training with DataParallel so batch size has been adjusted to: {self._train_batch_size:,}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps:,}")
logger.info(f" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}")
self.state.epoch = 0
start_time = time.time()
self.initial_num_input_tokens_seen_for_session = self.state.num_input_tokens_seen
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))
self.compare_trainer_and_checkpoint_args(self.args, self.state)
self._load_callback_state()
epochs_trained = int(self.state.global_step // num_update_steps_per_epoch)
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first"
f" {steps_trained_in_current_epoch} batches in the first epoch."
)
# Update the references
for attr in ("model", "optimizer", "lr_scheduler"):
setattr(self.callback_handler, attr, getattr(self, attr))
self.callback_handler.train_dataloader = train_dataloader
self.state.init_training_references(self, max_steps, num_train_epochs, trial)
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0, device=args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
grad_norm: float | None = None
learning_rate = None
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
if args.eval_on_start:
self._evaluate(trial, ignore_keys_for_eval, skip_scheduler=True)
for epoch in range(epochs_trained, num_train_epochs):
epoch_dataloader = train_dataloader
if hasattr(epoch_dataloader, "set_epoch"):
epoch_dataloader.set_epoch(epoch)
steps_in_epoch = (
len(epoch_dataloader)
if len_dataloader is not None
else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
step = -1
rng_to_sync = False
# Handle resumption from checkpoint
if epoch == epochs_trained and resume_from_checkpoint is not None:
if steps_trained_in_current_epoch > 0 and not args.ignore_data_skip:
epoch_dataloader = skip_first_batches(epoch_dataloader, steps_trained_in_current_epoch)
step = steps_trained_in_current_epoch - 1
rng_to_sync = True
elif steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
epoch_iterator = iter(epoch_dataloader)
# We chunkify the epoch iterator into gradient accumulation steps `n` batches
remainder = steps_in_epoch % args.gradient_accumulation_steps
if remainder == 0:
remainder = args.gradient_accumulation_steps
update_step = -1
total_updates = steps_in_epoch // args.gradient_accumulation_steps + int(
remainder < args.gradient_accumulation_steps
)
for _ in range(total_updates):
update_step += 1
num_batches = args.gradient_accumulation_steps if update_step != (total_updates - 1) else remainder
batch_samples, num_items_in_batch = self.get_batch_samples(epoch_iterator, num_batches, args.device)
# Store the number of batches for current gradient accumulation
# This is used to correctly scale the loss when the last accumulation step has fewer batches
self.current_gradient_accumulation_steps = len(batch_samples)
for i, inputs in enumerate(batch_samples):
step += 1
do_sync_step = (step + 1) % args.gradient_accumulation_steps == 0 or (step + 1) == steps_in_epoch
# Since we perform prefetching, we need to manually set sync_gradients
self.accelerator.gradient_state._set_sync_gradients(do_sync_step)
if self.args.include_num_input_tokens_seen != "no":
main_input_name = getattr(self.model, "main_input_name", "input_ids")
if main_input_name not in inputs:
logger.warning(
"Tried to track the number of tokens seen, however the current model is "
"not configured properly to know what item is the input. To fix this, add "
"a `main_input_name` attribute to the model class you are using."
)
else:
if self.args.include_num_input_tokens_seen == "non_padding":
if "attention_mask" in inputs:
input_tokens = inputs["attention_mask"].sum()
elif (
self.processing_class is not None
and hasattr(self.processing_class, "pad_token_id")
and self.processing_class.pad_token_id is not None
):
input_tokens = (
inputs[main_input_name] != self.processing_class.pad_token_id
).sum()
else:
logger.warning(
"Could not determine method to count non-padding tokens, falling back to counting all tokens."
)
input_tokens = inputs[main_input_name].numel()
else:
input_tokens = inputs[main_input_name].numel()
input_tokens = torch.tensor(input_tokens, device=self.args.device, dtype=torch.int64)
self.state.num_input_tokens_seen += self.accelerator.gather(input_tokens).sum().item()
if rng_to_sync:
self._load_rng_state(resume_from_checkpoint)
rng_to_sync = False
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
# We explicitly want to avoid relying on `accelerator.accumulate` for generation training
context = (
functools.partial(self.accelerator.no_sync, model=model)
if i != len(batch_samples) - 1
and self.accelerator.distributed_type != DistributedType.DEEPSPEED
else contextlib.nullcontext
)
with context():
tr_loss_step = self.training_step(model, inputs, num_items_in_batch)
if (
args.logging_nan_inf_filter
and not is_torch_xla_available()
and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
):
# if loss is nan or inf simply add the average of previous logged losses
tr_loss = tr_loss + tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
else:
if tr_loss.device != tr_loss_step.device:
raise ValueError(
f"Calculated loss must be on the original device: {tr_loss.device} but device in use is {tr_loss_step.device}"
)
tr_loss = tr_loss + tr_loss_step
self.current_flos += float(self.floating_point_ops(inputs))
if do_sync_step:
# Since we perform prefetching, we need to manually set sync_gradients to True
self.accelerator.gradient_state._set_sync_gradients(True)
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0:
if is_sagemaker_mp_enabled() and args.fp16:
_grad_norm = self.optimizer.clip_master_grads(args.max_grad_norm)
else:
grad_norm_context = contextlib.nullcontext
if self.is_tp_enabled:
from torch.distributed._tensor.experimental import implicit_replication
grad_norm_context = implicit_replication
with grad_norm_context():
_grad_norm = self.accelerator.clip_grad_norm_(
model.parameters(),
args.max_grad_norm,
)
if self.accelerator.distributed_type == DistributedType.DEEPSPEED:
grad_norm = model.get_global_grad_norm()
# In some cases the grad norm may not return a float
if hasattr(grad_norm, "item"):
grad_norm = grad_norm.item()
else:
grad_norm = _grad_norm
self.control = self.callback_handler.on_pre_optimizer_step(args, self.state, self.control)
context = contextlib.nullcontext
if self.is_tp_enabled:
from torch.distributed._tensor.experimental import implicit_replication
context = implicit_replication
with context():
self.optimizer.step()
self.control = self.callback_handler.on_optimizer_step(args, self.state, self.control)
# get leaning rate before update
learning_rate = self._get_learning_rate()
if not self.accelerator.optimizer_step_was_skipped:
# Delay optimizer scheduling until metrics are generated
if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(
tr_loss,
grad_norm,
model,
trial,
epoch,
ignore_keys_for_eval,
start_time,
learning_rate=learning_rate,
)
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
# PyTorch/XLA relies on the data loader to insert the mark_step for
# each step. Since we are breaking the loop early, we need to manually
# insert the mark_step here.
if self.control.should_epoch_stop or self.control.should_training_stop:
if is_torch_xla_available():
xm.mark_step()
break
# We also need to break out of the nested loop
if self.control.should_epoch_stop or self.control.should_training_stop:
if is_torch_xla_available():
xm.mark_step()
break
if step < 0:
logger.warning(
"There seems not to be a single sample in your epoch_iterator, stopping training at step"
f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
f" num_steps ({max_steps}) higher than the number of available samples."
)
self.control.should_training_stop = True
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(
tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=learning_rate
)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_xla_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
self._load_best_model()
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
effective_global_step = max(self.state.global_step, 0.001) # Avoid ZeroDivisionError
train_loss = self._total_loss_scalar / effective_global_step
metrics = speed_metrics(
"train",
start_time,
num_samples=num_train_samples,
num_steps=self.state.max_steps,
)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
run_dir = self._get_output_dir(trial)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir)
# Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save.
if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1:
for checkpoint in checkpoints_sorted:
if not os.path.samefile(checkpoint, self.state.best_model_checkpoint):
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint, ignore_errors=True)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
# Wait for the checkpoint to be uploaded.
self._finish_current_push()
# After training we make sure to retrieve back the original forward pass method
# for the embedding layer by removing the forward post hook.
if self.neftune_noise_alpha is not None:
self._deactivate_neftune(self.model)
return TrainOutput(self.state.global_step, train_loss, metrics)
def _get_output_dir(self, trial):
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
elif self.hp_search_backend == HPSearchBackend.RAY:
import ray.tune
run_id = ray.tune.get_context().get_trial_id()
elif self.hp_search_backend == HPSearchBackend.WANDB:
import wandb
run_id = wandb.run.id
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
return run_dir
def _load_from_checkpoint(self, resume_from_checkpoint, model=None):
if model is None:
model = self.model
config_file = os.path.join(resume_from_checkpoint, CONFIG_NAME)
adapter_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_WEIGHTS_NAME)
adapter_safe_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME)
weights_file = os.path.join(resume_from_checkpoint, WEIGHTS_NAME)
weights_index_file = os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME)
safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME)
safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME)
is_fsdp_ckpt = os.path.isdir(resume_from_checkpoint) and (
# this checks the FSDP state dict when `SHARDED_STATE_DICT` is used
any(
FSDP_MODEL_NAME in folder_name
for folder_name in os.listdir(resume_from_checkpoint)
if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name))
)
# this checks the FSDP state dict when `FULL_STATE_DICT` is used
or os.path.isfile(os.path.join(resume_from_checkpoint, f"{FSDP_MODEL_NAME}.bin"))
)
# if multiple adapters exist, they get saved in sub directories
adapter_subdirs = (
[
folder_name
for folder_name in os.listdir(resume_from_checkpoint)
if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name))
and (
os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_WEIGHTS_NAME))
or os.path.isfile(os.path.join(resume_from_checkpoint, folder_name, ADAPTER_SAFE_WEIGHTS_NAME))
)
]
if os.path.isdir(resume_from_checkpoint)
else []
)
if is_fsdp_ckpt and not self.is_fsdp_enabled:
raise ValueError(f"Checkpoint found at {resume_from_checkpoint} is only supported when using PyTorch FSDP")
if not (
any(
os.path.isfile(f)
for f in [
weights_file,
safe_weights_file,
weights_index_file,
safe_weights_index_file,
adapter_weights_file,
adapter_safe_weights_file,
]
)
or is_fsdp_ckpt
or adapter_subdirs
):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}.")
if os.path.isfile(config_file):
config = PreTrainedConfig.from_json_file(config_file)
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warning(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if os.path.isfile(weights_file) or os.path.isfile(safe_weights_file) or is_fsdp_ckpt:
# If the model is on the GPU, it still works!
if is_sagemaker_mp_enabled():
smp.resume_from_checkpoint(
path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False
)
elif self.is_fsdp_enabled:
load_fsdp_model(
self.accelerator.state.fsdp_plugin,
self.accelerator,
model,
resume_from_checkpoint,
**_get_fsdp_ckpt_kwargs(),
)
else:
# We load the model state dict on the CPU to avoid an OOM error.
if self.args.save_safetensors and os.path.isfile(safe_weights_file):
state_dict = safetensors.torch.load_file(safe_weights_file, device="cpu")
else:
check_torch_load_is_safe()
state_dict = torch.load(weights_file, map_location="cpu", weights_only=True)
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
# which takes *args instead of **kwargs
load_result = model.load_state_dict(state_dict, False)
# release memory
del state_dict
self._issue_warnings_after_load(load_result)
# Load adapters following PR # 24096
elif _is_peft_model(model):
# If training a model using PEFT, assume that adapter have been saved properly.
if hasattr(model, "active_adapters") and hasattr(model, "load_adapter"):
if os.path.exists(resume_from_checkpoint):
active_adapters = model.active_adapters
if len(active_adapters) > 1:
logger.warning("Multiple active adapters detected will only consider the first adapter")
active_adapter = active_adapters[0]
if adapter_subdirs:
for subdir_name in adapter_subdirs:
peft_id = os.path.join(resume_from_checkpoint, subdir_name)
model.load_adapter(peft_id, subdir_name, is_trainable=(subdir_name == active_adapter))
model.set_adapter(active_adapter)
else:
model.load_adapter(resume_from_checkpoint, active_adapter, is_trainable=True)
else:
logger.warning(
"The intermediate checkpoints of PEFT may not be saved correctly, "
f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. "
"Check some examples here: https://github.com/huggingface/peft/issues/96"
)
else:
logger.warning(f"Could not load adapter model, make sure to have PEFT >= {MIN_PEFT_VERSION} installed")
else:
# We load the sharded checkpoint
load_result = load_sharded_checkpoint(
model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled(), prefer_safe=self.args.save_safetensors
)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
def _load_best_model(self):
logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).")
best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
best_safe_model_path = os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_NAME)
best_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_WEIGHTS_NAME)
best_safe_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME)
model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if self.is_deepspeed_enabled:
deepspeed_load_checkpoint(
self.model_wrapped,
self.state.best_model_checkpoint,
load_module_strict=not _is_peft_model(self.model),
)
elif self.is_fsdp_enabled:
load_result = load_fsdp_model(
self.accelerator.state.fsdp_plugin,
self.accelerator,
model,
self.state.best_model_checkpoint,
**_get_fsdp_ckpt_kwargs(),
)
elif (
os.path.exists(best_model_path)
or os.path.exists(best_safe_model_path)
or os.path.exists(best_adapter_model_path)
or os.path.exists(best_safe_adapter_model_path)
):
has_been_loaded = True
if is_sagemaker_mp_enabled():
smp.resume_from_checkpoint(
path=self.state.best_model_checkpoint,
tag=WEIGHTS_NAME,
partial=False,
load_optimizer=False,
)
else:
if _is_peft_model(model):
# If training a model using PEFT, assume that adapter have been saved properly.
if hasattr(model, "active_adapters") and hasattr(model, "load_adapter"):
active_adapter = model.active_adapters[0]
if len(model.active_adapters) > 1:
logger.warning("Detected multiple active adapters, will only consider the first one")
if os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path):
try:
model.load_adapter(self.state.best_model_checkpoint, active_adapter)
except RuntimeError as exc:
if model.peft_config[active_adapter].is_prompt_learning:
# for context: https://github.com/huggingface/peft/issues/2256
msg = (
"When using prompt learning PEFT methods such as "
f"{model.peft_config[active_adapter].peft_type.value}, setting "
"load_best_model_at_end=True can lead to errors, it is recommended "
"to set this to False and to load the model manually from the checkpoint "
"directory using PeftModel.from_pretrained(base_model, <path>) after training "
"has finished."
)
raise RuntimeError(msg) from exc
else:
raise
# Load_adapter has no return value present, modify it when appropriate.
from torch.nn.modules.module import _IncompatibleKeys
load_result = _IncompatibleKeys([], [])
else:
logger.warning(
"The intermediate checkpoints of PEFT may not be saved correctly, "
f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. "
"Check some examples here: https://github.com/huggingface/peft/issues/96"
)
has_been_loaded = False
else:
logger.warning(
f"Could not load adapter model, make sure to have PEFT >= {MIN_PEFT_VERSION} installed"
)
has_been_loaded = False
else:
# We load the model state dict on the CPU to avoid an OOM error.
if self.args.save_safetensors and os.path.isfile(best_safe_model_path):
state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu")
else:
check_torch_load_is_safe()
state_dict = torch.load(best_model_path, map_location="cpu", weights_only=True)
# If the model is on the GPU, it still works!
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
# which takes *args instead of **kwargs
load_result = model.load_state_dict(state_dict, False)
if not is_sagemaker_mp_enabled() and has_been_loaded:
self._issue_warnings_after_load(load_result)
elif os.path.exists(os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_INDEX_NAME)) or os.path.exists(
os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)
):
load_result = load_sharded_checkpoint(
model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled()
)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
else:
logger.warning(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
"on multiple nodes, you should activate `--save_on_each_node`."
)
def _issue_warnings_after_load(self, load_result):
if len(load_result.missing_keys) != 0:
if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set(
self.model._keys_to_ignore_on_save
):
self.model.tie_weights()
else:
logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warning(
f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}."
)
def _evaluate(self, trial, ignore_keys_for_eval, skip_scheduler=False):
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, self.state.global_step, metrics)
# Run delayed LR scheduler now that metrics are populated
if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) and not skip_scheduler:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
try:
self.lr_scheduler.step(metrics[metric_to_check])
except KeyError as exc:
raise KeyError(
f"The `metric_for_best_model` training argument is set to '{metric_to_check}', "
f"which is not found in the evaluation metrics. "
f"The available evaluation metrics are: {list(metrics.keys())}. "
f"Please ensure that the `compute_metrics` function returns a dictionary that includes '{metric_to_check}' or "
f"consider changing the `metric_for_best_model` via the TrainingArguments."
) from exc
return metrics
def _maybe_log_save_evaluate(
self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time, learning_rate=None
):
if self.control.should_log and self.state.global_step > self._globalstep_last_logged:
if is_torch_xla_available():
xm.mark_step()
logs: dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged)
if grad_norm is not None:
logs["grad_norm"] = grad_norm.item() if isinstance(grad_norm, torch.Tensor) else grad_norm
if learning_rate is not None:
logs["learning_rate"] = learning_rate
else:
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs, start_time)
metrics = None
if self.control.should_evaluate:
metrics = self._evaluate(trial, ignore_keys_for_eval)
is_new_best_metric = self._determine_best_metric(metrics=metrics, trial=trial)
if self.args.save_strategy == SaveStrategy.BEST:
self.control.should_save = is_new_best_metric
if self.control.should_save:
self._save_checkpoint(model, trial)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
if self.args.world_size > 1:
process_index = self.args.process_index
rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth")
if not os.path.isfile(rng_file):
logger.info(
f"Didn't find an RNG file for process {process_index}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(rng_file):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
with safe_globals():
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if is_torch_xla_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
is_distributed = self.args.parallel_mode == ParallelMode.DISTRIBUTED
if torch.cuda.is_available():
set_rng_state_for_device("CUDA", torch.cuda, checkpoint_rng_state, is_distributed)
if is_torch_npu_available():
set_rng_state_for_device("NPU", torch.npu, checkpoint_rng_state, is_distributed)
if is_torch_hpu_available():
set_rng_state_for_device("HPU", torch.hpu, checkpoint_rng_state, is_distributed)
if is_torch_mlu_available():
set_rng_state_for_device("MLU", torch.mlu, checkpoint_rng_state, is_distributed)
if is_torch_musa_available():
set_rng_state_for_device("MUSA", torch.musa, checkpoint_rng_state, is_distributed)
def _determine_best_metric(self, metrics, trial):
"""
Determine if the model should be saved based on the evaluation metrics.
Returns:
bool: True if a new best metric was found, else False
"""
is_new_best_metric = False
if self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
try:
metric_value = metrics[metric_to_check]
except KeyError as exc:
raise KeyError(
f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. "
f"The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments."
) from exc
operator = np.greater if self.args.greater_is_better else np.less
if self.state.best_metric is None:
self.state.best_metric = float("-inf") if self.args.greater_is_better else float("inf")
if operator(metric_value, self.state.best_metric):
self.state.best_metric = metric_value
if self.args.save_strategy in [SaveStrategy.STEPS, SaveStrategy.EPOCH]:
self.state.best_global_step = self.state.global_step
is_new_best_metric = True
return is_new_best_metric
def _save_checkpoint(self, model, trial):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is None and trial is None:
self.store_flos()
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir, _internal_call=True)
if self.args.save_strategy in [SaveStrategy.STEPS, SaveStrategy.EPOCH] and self.state.best_global_step:
# Wait for everyone to get here so we are sure the model has been saved by process 0
# before we check if the best_checkpoint_dir exists
if is_torch_xla_available():
xm.rendezvous("load_best_model_at_end")
elif self.args.parallel_mode == ParallelMode.DISTRIBUTED:
dist.barrier()
elif is_sagemaker_mp_enabled():
smp.barrier()
best_checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.best_global_step}"
best_checkpoint_dir = os.path.join(run_dir, best_checkpoint_folder)
if os.path.exists(best_checkpoint_dir):
self.state.best_model_checkpoint = best_checkpoint_dir
if not self.args.save_only_model:
# Save optimizer and scheduler
self._save_optimizer_and_scheduler(output_dir)
self._save_scaler(output_dir)
# Save RNG state
self._save_rng_state(output_dir)
# Save the Trainer state
if self.args.should_save:
# Update `ExportableState` callbacks and `TrainerControl` state to where we are currently
for cb in [
cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState)
]:
cb_name = cb.__class__.__name__
cb_state = cb.state()
if isinstance(self.state.stateful_callbacks[cb_name], list):
self.state.stateful_callbacks[cb_name].append(cb_state)
else:
self.state.stateful_callbacks[cb_name] = cb_state
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
if self.args.push_to_hub:
self._push_from_checkpoint(output_dir)
# Maybe delete some older checkpoints.
if self.args.should_save:
# we use mtime as default, filesystems without mtime support will be detected in `_sorted_checkpoints`
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _save_rng_state(self, output_dir):
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.parallel_mode == ParallelMode.DISTRIBUTED:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_xla_available():
rng_states["xla"] = xm.get_rng_state()
if is_torch_npu_available():
if self.args.parallel_mode == ParallelMode.DISTRIBUTED:
rng_states["npu"] = torch.npu.random.get_rng_state_all()
else:
rng_states["npu"] = torch.npu.random.get_rng_state()
if is_torch_hpu_available():
if self.args.parallel_mode == ParallelMode.DISTRIBUTED:
rng_states["hpu"] = torch.hpu.random.get_rng_state_all()
else:
rng_states["hpu"] = torch.hpu.random.get_rng_state()
if is_torch_mlu_available():
if self.args.parallel_mode == ParallelMode.DISTRIBUTED:
rng_states["mlu"] = torch.mlu.random.get_rng_state_all()
else:
rng_states["mlu"] = torch.mlu.random.get_rng_state()
if is_torch_musa_available():
if self.args.parallel_mode == ParallelMode.DISTRIBUTED:
rng_states["musa"] = torch.musa.get_rng_state_all()
else:
rng_states["musa"] = torch.musa.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
if self.args.world_size <= 1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth"))
def _save_optimizer_and_scheduler(self, output_dir):
if is_torch_xla_available():
xm.rendezvous("saving_optimizer_states")
if self.is_fsdp_xla_v1_enabled:
optm = {
"optimizer": self.optimizer.state_dict(),
"shard_metadata": self.model.get_shard_metadata(),
}
xm.save(
optm,
os.path.join(
output_dir, f"rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}"
),
master_only=False,
)
else:
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False)
smp.barrier()
if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state:
smp.save(
opt_state_dict,
os.path.join(output_dir, OPTIMIZER_NAME),
partial=True,
v3=smp.state.cfg.shard_optimizer_state,
)
elif self.is_deepspeed_enabled:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_16bit_weights_on_model_save` is True
accept_exclude_frozen_parameters = "exclude_frozen_parameters" in set(
inspect.signature(self.model_wrapped.save_checkpoint).parameters.keys()
)
if accept_exclude_frozen_parameters and _is_peft_model(self.model):
self.model_wrapped.save_checkpoint(output_dir, exclude_frozen_parameters=True)
else:
self.model_wrapped.save_checkpoint(output_dir)
elif self.is_fsdp_enabled:
# save fsdp specific ckpt for resuming from ckpt
save_fsdp_model(
self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir, **_get_fsdp_ckpt_kwargs()
)
save_fsdp_optimizer(
self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir
)
elif self.args.should_save:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
# Save SCHEDULER & SCALER
is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and not isinstance(
self.lr_scheduler, DeepSpeedSchedulerWrapper
)
if (
self.args.should_save
and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler)
and not is_torch_xla_available()
):
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.is_deepspeed_enabled:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
if not isinstance(self.lr_scheduler, DeepSpeedSchedulerWrapper):
with warnings.catch_warnings(record=True) as caught_warnings:
check_torch_load_is_safe()
self.lr_scheduler.load_state_dict(
torch.load(os.path.join(checkpoint, SCHEDULER_NAME), weights_only=True)
)
reissue_pt_warnings(caught_warnings)
return
checkpoint_file_exists = (
glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*")
if is_sagemaker_mp_enabled()
else (
os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME))
or os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME_BIN))
or (
os.path.isdir(checkpoint)
and any(
OPTIMIZER_NAME_BIN.split(".")[0] in folder_name
for folder_name in os.listdir(checkpoint)
if os.path.isdir(os.path.join(checkpoint, folder_name))
)
)
)
)
checkpoint_file_exists = (
glob.glob(os.path.join(checkpoint, f"rank*-of-{self.args.world_size}-{OPTIMIZER_NAME}"))
if self.is_fsdp_xla_v1_enabled
else checkpoint_file_exists
)
if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)):
# Load in optimizer and scheduler states
if is_torch_xla_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
if self.is_fsdp_xla_v1_enabled:
check_torch_load_is_safe()
optimizer_state = torch.load(
os.path.join(
checkpoint, f"rank{self.args.process_index}-of-{self.args.world_size}-{OPTIMIZER_NAME}"
),
map_location="cpu",
weights_only=True,
)
# We only need `optimizer` when resuming from checkpoint
optimizer_state = optimizer_state["optimizer"]
else:
check_torch_load_is_safe()
optimizer_state = torch.load(
os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu", weights_only=True
)
with warnings.catch_warnings(record=True) as caught_warnings:
check_torch_load_is_safe()
lr_scheduler_state = torch.load(
os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu", weights_only=True
)
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
if is_sagemaker_mp_enabled():
def opt_load_hook(mod, opt):
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
self.model_wrapped.register_post_step_hook(opt_load_hook)
else:
# We use the CPU when training on one GPU to avoid OOM for GPU RAM when training big models.
# In distributed training however, we load directly on each GPU and risk the GPU OOM as it's more
# likely to get OOM on CPU (since we load num_gpu times the optimizer state
map_location = self.args.device if self.args.world_size > 1 else "cpu"
if self.is_fsdp_enabled:
load_fsdp_optimizer(
self.accelerator.state.fsdp_plugin,
self.accelerator,
self.optimizer,
self.model,
checkpoint,
**_get_fsdp_ckpt_kwargs(),
)
else:
check_torch_load_is_safe()
self.optimizer.load_state_dict(
torch.load(
os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location, weights_only=True
)
)
with warnings.catch_warnings(record=True) as caught_warnings:
check_torch_load_is_safe()
self.lr_scheduler.load_state_dict(
torch.load(os.path.join(checkpoint, SCHEDULER_NAME), weights_only=True)
)
reissue_pt_warnings(caught_warnings)
def _save_scaler(self, output_dir):
# See if there is a scaler attribute
try:
scaler = self.accelerator.scaler
except AttributeError:
return
if scaler is None:
return
if is_torch_xla_available():
xm.rendezvous("saving_scaler_state")
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.accelerator.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
reissue_pt_warnings(caught_warnings)
# Save SCALER
if self.args.should_save and not is_torch_xla_available():
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.accelerator.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
reissue_pt_warnings(caught_warnings)
def _load_scaler(self, checkpoint):
"""If scaler state exists, load it."""
if checkpoint is None:
return
checkpoint_file_exists = os.path.isfile(os.path.join(checkpoint, SCALER_NAME))
if checkpoint_file_exists:
# On TPU we have to take some extra precautions to properly load the states on the right device.
# Load in scaler states
if is_torch_xla_available():
with warnings.catch_warnings(record=True) as caught_warnings:
check_torch_load_is_safe()
scaler_state = torch.load(
os.path.join(checkpoint, SCALER_NAME), map_location="cpu", weights_only=True
)
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(scaler_state, self.args.device)
self.accelerator.scaler.load_state_dict(scaler_state)
else:
with warnings.catch_warnings(record=True) as caught_warnings:
check_torch_load_is_safe()
self.accelerator.scaler.load_state_dict(
torch.load(os.path.join(checkpoint, SCALER_NAME), weights_only=True)
)
reissue_pt_warnings(caught_warnings)
def _load_callback_state(self):
"""If callback states exist and were passed in, restore their states if enabled"""
if not self.args.restore_callback_states_from_checkpoint:
return
# Callback states are stored in stateful_callbacks
not_found = []
new_callbacks = []
original_callbacks = self.callback_handler.callbacks + [self.control]
for stored_callback, data in self.state.stateful_callbacks.items():
if not isinstance(data, list):
data = [data]
if any(callback.__class__.__name__ == stored_callback for callback in original_callbacks):
# We can load/restore from multiple callbacks of the same type.
duplicates = [
callback for callback in original_callbacks if callback.__class__.__name__ == stored_callback
]
for callback, callback_data in zip(duplicates, data):
args = callback_data.get("args", {})
attributes = callback_data.get("attributes", {})
new_callback = type(callback)(**args)
for attribute, value in attributes.items():
setattr(new_callback, attribute, value)
if isinstance(callback, TrainerControl):
# Specifically for restoring the `control` state
self.control = new_callback
else:
new_callbacks.append(new_callback)
# We remove the existing callback and add it to the list of new callbacks
self.callback_handler.remove_callback(type(new_callback))
logger.info("Continuing training from checkpoint, restoring any callbacks that were passed in")
else:
not_found.append(stored_callback)
if len(not_found) > 0:
logger.warning(
f"Checkpoint included callbacks not included in current configuration. Ignoring. ({', '.join(not_found)})"
)
for callback in new_callbacks:
self.callback_handler.add_callback(callback)
def hyperparameter_search(
self,
hp_space: Callable[["optuna.Trial"], dict[str, float]] | None = None,
compute_objective: Callable[[dict[str, float]], float] | None = None,
n_trials: int = 20,
direction: str | list[str] = "minimize",
backend: Union["str", HPSearchBackend] | None = None,
hp_name: Callable[["optuna.Trial"], str] | None = None,
**kwargs,
) -> BestRun | list[BestRun]:
"""
Launch an hyperparameter search using `optuna` or `Ray Tune`. The optimized quantity is determined
by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
the sum of all metrics otherwise.
<Tip warning={true}>
To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
optimizer/scheduler.
</Tip>
Args:
hp_space (`Callable[["optuna.Trial"], dict[str, float]]`, *optional*):
A function that defines the hyperparameter search space. Will default to
[`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`]
depending on your backend.
compute_objective (`Callable[[dict[str, float]], float]`, *optional*):
A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
method. Will default to [`~trainer_utils.default_compute_objective`].
n_trials (`int`, *optional*, defaults to 100):
The number of trial runs to test.
direction (`str` or `list[str]`, *optional*, defaults to `"minimize"`):
If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you
should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or
several metrics. If it's multi objectives optimization, direction is `list[str]`, can be List of
`"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss,
`"maximize"` when optimizing one or several metrics.
backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending
on which one is installed. If all are installed, will default to optuna.
hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
A function that defines the trial/run name. Will default to None.
kwargs (`dict[str, Any]`, *optional*):
Additional keyword arguments for each backend:
- `optuna`: parameters from
[optuna.study.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
and also the parameters `timeout`, `n_jobs` and `gc_after_trial` from
[optuna.study.Study.optimize](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize)
- `ray`: parameters from [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run).
If `resources_per_trial` is not set in the `kwargs`, it defaults to 1 CPU core and 1 GPU (if available).
If `progress_reporter` is not set in the `kwargs`,
[ray.tune.CLIReporter](https://docs.ray.io/en/latest/tune/api/doc/ray.tune.CLIReporter.html) is used.
Returns:
[`trainer_utils.BestRun` or `list[trainer_utils.BestRun]`]: All the information about the best run or best
runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray
backend.
"""
if backend is None:
backend = default_hp_search_backend()
backend = HPSearchBackend(backend)
backend_obj = ALL_HYPERPARAMETER_SEARCH_BACKENDS[backend]()
backend_obj.ensure_available()
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = backend_obj.default_hp_space if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
best_run = backend_obj.run(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: dict[str, float], start_time: float | None = None) -> None:
"""
Log `logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (`dict[str, float]`):
The values to log.
start_time (`Optional[float]`):
The start of training.
"""
if self.state.epoch is not None:
logs["epoch"] = self.state.epoch
if self.args.include_num_input_tokens_seen != "no":
logs["num_input_tokens_seen"] = self.state.num_input_tokens_seen
if start_time is not None:
current_session_num_tokens = (
self.state.num_input_tokens_seen - self.initial_num_input_tokens_seen_for_session
)
logs.update(speed_metrics("train", start_time, num_tokens=current_session_num_tokens))
output = {**logs, "step": self.state.global_step}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_input(self, data: torch.Tensor | Any) -> torch.Tensor | Any:
"""
Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
"""
if isinstance(data, Mapping):
return type(data)({k: self._prepare_input(v) for k, v in data.items()})
elif isinstance(data, (tuple, list)):
return type(data)(self._prepare_input(v) for v in data)
elif isinstance(data, torch.Tensor):
kwargs = {"device": self.args.device}
if self.is_deepspeed_enabled and (torch.is_floating_point(data) or torch.is_complex(data)):
# NLP models inputs are int/uint and those get adjusted to the right dtype of the
# embedding. Other models such as wav2vec2's inputs are already float and thus
# may need special handling to match the dtypes of the model
kwargs.update({"dtype": self.accelerator.state.deepspeed_plugin.hf_ds_config.dtype()})
return data.to(**kwargs)
return data
def _prepare_inputs(self, inputs: dict[str, torch.Tensor | Any]) -> dict[str, torch.Tensor | Any]:
"""
Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
inputs = self._prepare_input(inputs)
if len(inputs) == 0:
raise ValueError(
"The batch received was empty, your model won't be able to train on it. Double-check that your "
f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}."
)
return inputs
def _is_attention_mask_causal(self, attention_mask):
"""
Check if an attention mask is causal (compatible with causal attention).
Context parallelism only supports causal attention patterns. This function
checks if the provided attention mask is compatible.
Args:
attention_mask (torch.Tensor): The attention mask to check
Returns:
bool: True if the mask is causal or compatible with causal attention
"""
if attention_mask is None:
return True # No mask is considered causal (model uses default causal masking)
# Handle different mask dimensions
if attention_mask.dim() == 2:
# (batch_size, seq_len) - standard padding mask, compatible with causal attention
return True
elif attention_mask.dim() in [3, 4]:
# (batch_size, seq_len, seq_len) or (batch_size, num_heads, seq_len, seq_len)
# Check if it's lower triangular (causal)
seq_len = attention_mask.shape[-1]
if seq_len <= 1:
return True # Single token or empty is always causal
# Take first batch and head (if 4D) for checking pattern
if attention_mask.dim() == 4:
mask = attention_mask[0, 0] # First batch, first head
else:
mask = attention_mask[0] # First batch
# Check if upper triangular part is masked (should be 0 or very negative for causal)
upper_triangular = torch.triu(mask, diagonal=1)
# For causal masks, upper triangular should be 0 or very negative (like -inf)
# Use a reasonable threshold to handle float precision issues
is_causal = torch.all(upper_triangular <= 1e-6) or torch.all(upper_triangular < -1e4)
return is_causal.item() if isinstance(is_causal, torch.Tensor) else is_causal
# For unknown dimensions, be conservative and reject
return False
def _prepare_context_parallel_inputs(self, model, inputs: dict[str, torch.Tensor | Any]):
"""
Prepare inputs for context parallelism by setting up buffers and validation.
Args:
model: The model being trained
inputs: Input tensors to prepare
Returns:
tuple: (context_manager, prepared_inputs) where context_manager is either
the context parallelism wrapper or a no-op context
"""
if (
getattr(self.accelerator, "parallelism_config", None) is not None
and self.accelerator.parallelism_config.cp_enabled
):
if self.accelerator.parallelism_config.cp_backend == "torch":
if hasattr(model, "config"):
if model.config._attn_implementation != "sdpa":
raise ValueError(
f"Context parallelism is supported only with SDPA attention, you are using {model.config._attn_implementation}."
)
if "shift_labels" not in inputs:
logger.warning_once("Shift labels not found in the inputs, shifting manually")
if "labels" in inputs:
_ignore_index = -100
labels = nn.functional.pad(inputs["labels"], (0, 1), value=_ignore_index)
inputs["shift_labels"] = labels[:, 1:].contiguous()
# note: we don't do anything for accelerator.parallelism_config.sp_backend == "deepspeed" since:
# - accelerator.parallelism_config performs the `model.config._attn_implementation` checks already and it supports more than `dspa`
# - UlyssesSPDataLoaderAdapter called from Accelerate performs the `shift_label` creation - must not interfere
# - position_ids generation should be done by HF Trainer if it wasn't done by the user
if "position_ids" not in inputs:
logger.warning_once("Position IDs not found in the inputs, generating manually")
inputs["position_ids"] = torch.arange(
inputs["input_ids"].size(1), device=inputs["input_ids"].device
).expand(inputs["input_ids"].size(0), -1)
buffers = []
buffer_seq_dims = []
if "input_ids" in inputs:
buffers.append(inputs["input_ids"])
buffer_seq_dims.append(1) # Sequence dimension
if "labels" in inputs:
buffers.append(inputs["labels"])
buffer_seq_dims.append(1)
if "shift_labels" in inputs:
buffers.append(inputs["shift_labels"])
buffer_seq_dims.append(1)
# Add attention_mask to buffers for context parallel splitting (only if causal)
if "attention_mask" in inputs:
# Only validate causal mask once for performance
if not getattr(self, "_attn_mask_causal_checked", False):
# Context parallel currently doesn't support other masks than causal
# Accelerate applies hooks to replace mask with is_causal arg in SDPA
# Check if the mask is really causal and if not throw an error
attention_mask = inputs["attention_mask"]
if not self._is_attention_mask_causal(attention_mask):
raise ValueError(
"Context parallelism only supports causal attention masks. "
"The provided attention_mask is not causal. "
"Please ensure your data uses causal masking (lower triangular) "
"or remove the attention_mask to use the model's default causal masking."
)
self._attn_mask_causal_checked = True
if self._attn_mask_causal_checked:
# Add to buffers only after validation (or if validation already passed)
attention_mask = inputs["attention_mask"]
if attention_mask.dim() == 2:
buffers.append(attention_mask)
buffer_seq_dims.append(1)
else:
# Other dimensionality; keep as-is without sharding to avoid incorrect splits
pass
# Include position_ids in context parallelism splitting
if "position_ids" in inputs and inputs["position_ids"] is not None:
buffers.append(inputs["position_ids"])
buffer_seq_dims.append(1)
return partial(
self.accelerator.maybe_context_parallel,
buffers=buffers,
buffer_seq_dims=buffer_seq_dims,
no_restore_buffers=set(buffers),
), inputs
return contextlib.nullcontext, inputs
def compute_loss_context_manager(self):
"""
A helper wrapper to group together context managers.
"""
ctx_stack = contextlib.ExitStack()
autocast_ctx = self.autocast_smart_context_manager()
if not isinstance(autocast_ctx, contextlib.nullcontext):
ctx_stack.enter_context(autocast_ctx)
return ctx_stack
def autocast_smart_context_manager(self, cache_enabled: bool | None = True):
"""
A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
arguments, depending on the situation. We rely on accelerate for autocast, hence we do nothing here.
"""
return contextlib.nullcontext()
def training_step(
self,
model: nn.Module,
inputs: dict[str, torch.Tensor | Any],
num_items_in_batch: torch.Tensor | None = None,
) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to train.
inputs (`dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
Return:
`torch.Tensor`: The tensor with training loss on this batch.
"""
# Prepare buffers for context parallelism
cp_context, inputs = self._prepare_context_parallel_inputs(model, inputs)
# Context manager is no-op if CP isn't enabled
with cp_context():
model.train()
if hasattr(self.optimizer, "train") and callable(self.optimizer.train):
self.optimizer.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch)
del inputs
if (
self.args.torch_empty_cache_steps is not None
and self.state.global_step % self.args.torch_empty_cache_steps == 0
):
clear_device_cache()
kwargs = {}
# For LOMO optimizers you need to explicitly use the learning rate
if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:
kwargs["learning_rate"] = self._get_learning_rate()
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
# Finally we need to normalize the loss for reporting if GA loss bug is not fixed during compute loss
if (not self.model_accepts_loss_kwargs or num_items_in_batch is None) and self.compute_loss_func is None:
# If the model does not accept loss kwargs, we need to normalize the loss by the number of gradient accumulation steps
loss = loss / self.current_gradient_accumulation_steps
# Turning off loss scaling w.r.t. gradient accumulation when DeepSpeed is enabled
# https://github.com/huggingface/transformers/pull/35808
if self.accelerator.distributed_type == DistributedType.DEEPSPEED:
kwargs["scale_wrt_gas"] = False
self.accelerator.backward(loss, **kwargs)
return loss.detach()
def compute_loss(
self,
model: nn.Module,
inputs: dict[str, torch.Tensor | Any],
return_outputs: bool = False,
num_items_in_batch: torch.Tensor | None = None,
):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Args:
model (`nn.Module`):
The model to compute the loss for.
inputs (`dict[str, Union[torch.Tensor, Any]]`):
The input data for the model.
return_outputs (`bool`, *optional*, defaults to `False`):
Whether to return the model outputs along with the loss.
num_items_in_batch (Optional[torch.Tensor], *optional*):
The number of items in the batch. If num_items_in_batch is not passed,
Returns:
The loss of the model along with its output if return_outputs was set to True
Subclass and override for custom behavior. If you are not using `num_items_in_batch` when computing your loss,
make sure to overwrite `self.model_accepts_loss_kwargs` to `False`. Otherwise, the loss calculating might be slightly inaccurate when performing gradient accumulation.
"""
pc = getattr(self.accelerator, "parallelism_config", None)
if pc is not None and pc.sp_backend == "deepspeed" and pc.sp_enabled:
return self._deepspeed_sp_compute_loss(model, inputs, return_outputs, pc)
if (self.label_smoother is not None or self.compute_loss_func is not None) and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
if self.model_accepts_loss_kwargs:
kwargs = {}
if num_items_in_batch is not None:
kwargs["num_items_in_batch"] = num_items_in_batch
inputs = {**inputs, **kwargs}
outputs = model(**inputs)
# User-defined compute_loss function
if self.compute_loss_func is not None:
if labels is None:
logger.warning(
"Trainer: `compute_loss_func` is defined but `labels=None`. "
"Your custom loss function will still be called with labels=None. "
)
loss = self.compute_loss_func(
outputs,
labels,
num_items_in_batch=num_items_in_batch,
)
# Default HF loss handling (label smoothing) if no custom loss function
elif labels is not None:
unwrapped_model = self.accelerator.unwrap_model(model)
model_name = (
unwrapped_model.base_model.model._get_name()
if _is_peft_model(unwrapped_model)
else unwrapped_model._get_name()
)
if model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
loss = self.label_smoother(outputs, labels, shift_labels=True)
else:
loss = self.label_smoother(outputs, labels)
else:
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
if (
self.args.average_tokens_across_devices
and (self.model_accepts_loss_kwargs or self.compute_loss_func)
and num_items_in_batch is not None
):
loss *= self.accelerator.num_processes if self.args.n_gpu <= 1 else self.args.n_gpu
return (loss, outputs) if return_outputs else loss
def _deepspeed_sp_compute_loss(self, model, inputs, return_outputs, pc):
"""
How the loss is computed by the Trainer under sequence parallelism with sp_backend=="deepspeed" and sp_size>1.
Performs weighted loss aggregation across SP ranks, accounting for varying numbers of valid tokens per rank
(e.g., when some ranks receive only padding or prompt tokens that are masked with -100).
Args:
model (`nn.Module`):
The model to compute the loss for.
inputs (`dict[str, Union[torch.Tensor, Any]]`):
The input data for the model. Must include "shift_labels" key.
return_outputs (`bool`, *optional*, defaults to `False`):
Whether to return the model outputs along with the loss.
pc (`accelerate.parallelism_config.ParallelismConfig`):
self.accelerator.parallelism_config object (not None)
Returns:
The loss of the model along with its output if return_outputs was set to True
"""
# DeepSpeed SP automatically injects shift_labels into inputs (pre-shifted labels for SP).
# The model's forward pass receives shift_labels via **kwargs and passes it to the loss function.
# Both standard transformer models and Liger-patched models handle shift_labels correctly,
# so we can directly use the computed loss from the model output.
# See: https://huggingface.co/docs/accelerate/en/concept_guides/sequence_parallelism
outputs = model(**inputs)
loss = outputs.loss
sp_group = self.accelerator.torch_device_mesh["sp"].get_group()
sp_world_size = pc.sp_size
# differentiable weighted per-shard-loss aggregation across ranks
losses_per_rank = torch.distributed.nn.functional.all_gather(loss, group=sp_group)
# special dealing with SFT that has prompt tokens that aren't used in loss computation
good_tokens = (inputs["shift_labels"] != -100).view(-1).sum()
good_tokens_per_rank = torch.distributed.nn.functional.all_gather(good_tokens, group=sp_group)
# Skip ranks with zero valid tokens
total_loss = sum(
losses_per_rank[rank] * good_tokens_per_rank[rank]
for rank in range(sp_world_size)
if good_tokens_per_rank[rank] > 0
)
total_good_tokens = sum(good_tokens_per_rank)
loss = total_loss / max(total_good_tokens, 1)
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be `True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: str | None = None, _internal_call: bool = False):
"""
Will save the model, so you can reload it using `from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_xla_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
os.makedirs(output_dir, exist_ok=True)
state_dict = self.model_wrapped.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
Path(os.path.join(output_dir, "user_content.pt")).touch()
# We are in N-D parallelism if we have parallelism_config set, so we check accelerate if we're on a to_save rank
elif getattr(self.accelerator, "parallelism_config", None) is not None:
# DeepSpeed SP already handles checkpoint saving below, so skip manual save in that case
pc = getattr(self.accelerator, "parallelism_config")
if self.accelerator.should_save_model and not (pc.sp_enabled and pc.sp_backend == "deepspeed"):
self._save(output_dir)
# If we drop to here, we're in 1D parallelism, so all ranks need to go to `save_pretrained`
elif (tp_size := getattr(self.model, "_tp_size", 0)) is not None and tp_size > 1:
self._save(output_dir)
elif self.is_fsdp_enabled:
if "FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type):
state_dict = self.accelerator.get_state_dict(self.model)
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif self.is_deepspeed_enabled:
try:
state_dict = self.accelerator.get_state_dict(self.deepspeed)
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
except ValueError:
logger.warning(
" stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use"
" zero_to_fp32.py to recover weights"
)
if self.args.should_save:
self._save(output_dir, state_dict={})
# remove the dummy state_dict
remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME])
self.model_wrapped.save_checkpoint(output_dir)
elif self.args.should_save:
self._save(output_dir)
# Push to the Hub when `save_model` is called by the user.
if self.args.push_to_hub and not _internal_call:
self.push_to_hub(commit_message="Model save", revision=self.args.hub_revision)
def _save_tpu(self, output_dir: str | None = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
model = self.model
xm.mark_step()
if xm.is_master_ordinal(local=False):
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
supported_classes = (PushToHubMixin,)
xm.rendezvous("saving_checkpoint")
if self.is_fsdp_xla_v1_enabled:
ckpt = {
"model": model.state_dict(),
"shard_metadata": model.get_shard_metadata(),
}
ckpt_path = os.path.join(
output_dir, f"rank{self.args.process_index}-of-{self.args.world_size}-{WEIGHTS_NAME}"
)
# All ranks save sharded checkpoint
xm.save(ckpt, ckpt_path, master_only=False)
# Make sure all ranks have saved checkpoints
xm.rendezvous("save_full_checkpoints")
# Master save full checkpoint
if self.args.should_save:
from torch_xla.distributed.fsdp import consolidate_sharded_model_checkpoints
full_state_dict, _ = consolidate_sharded_model_checkpoints(
ckpt_prefix=os.path.join(output_dir, ""),
ckpt_suffix=f"rank*-of-*-{WEIGHTS_NAME}",
save_model=False,
)
model = model.module.module
unwrapped_model = self.accelerator.unwrap_model(model)
if isinstance(unwrapped_model, supported_classes):
unwrapped_model.save_pretrained(
output_dir,
state_dict=full_state_dict,
save_function=xm.save,
safe_serialization=self.args.save_safetensors,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
xm.save(full_state_dict, os.path.join(output_dir, WEIGHTS_NAME))
elif not isinstance(model, supported_classes):
if isinstance(self.accelerator.unwrap_model(model), supported_classes):
self.accelerator.unwrap_model(model).save_pretrained(
output_dir,
is_main_process=self.args.should_save,
state_dict=xm._maybe_convert_to_cpu(model.state_dict()),
save_function=xm.save,
safe_serialization=self.args.save_safetensors,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = xm._maybe_convert_to_cpu(model.state_dict())
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
model.save_pretrained(
output_dir,
is_main_process=self.args.should_save,
save_function=xm.save,
safe_serialization=self.args.save_safetensors,
state_dict=xm._maybe_convert_to_cpu(model.state_dict()),
)
if self.processing_class is not None and self.args.should_save:
self.processing_class.save_pretrained(output_dir)
def _save(self, output_dir: str | None = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, supported_classes):
if state_dict is None:
state_dict = self.model.state_dict()
if isinstance(self.accelerator.unwrap_model(self.model, keep_torch_compile=False), supported_classes):
self.accelerator.unwrap_model(self.model, keep_torch_compile=False).save_pretrained(
output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if self.args.save_safetensors:
safetensors.torch.save_file(
state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}
)
else:
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(
output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors
)
if self.processing_class is not None:
self.processing_class.save_pretrained(output_dir)
elif (
self.data_collator is not None
and hasattr(self.data_collator, "tokenizer")
and self.data_collator.tokenizer is not None
):
logger.info("Saving Trainer.data_collator.tokenizer by default as Trainer.processing_class is `None`")
self.data_collator.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.parallel_mode == ParallelMode.DISTRIBUTED:
self.state.total_flos += (
distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item()
)
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> list[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
# mtime is not reliable on all filesystems, especially on some fuse fs in cloud environments
# so we check if the mtime is fake and fallback to numerical ordering if needed
if use_mtime and len(ordering_and_checkpoint_path) > 1:
mtime_diff = checkpoints_sorted[-1][0] - checkpoints_sorted[0][0]
if mtime_diff < 1.0: # less than 1 second, which is almost impossible when mtime works fine
warnings.warn("mtime may not be reliable on this filesystem, falling back to numerical ordering")
return self._sorted_checkpoints(
use_mtime=False, output_dir=output_dir, checkpoint_prefix=checkpoint_prefix
)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if (
self.state.best_model_checkpoint is not None
and str(Path(self.state.best_model_checkpoint)) in checkpoints_sorted
):
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint, ignore_errors=True)
def evaluate(
self,
eval_dataset: Dataset | dict[str, Dataset] | None = None,
ignore_keys: list[str] | None = None,
metric_key_prefix: str = "eval",
) -> dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init `compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (Union[`Dataset`, dict[str, `Dataset`]], *optional*):
Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will
evaluate on each dataset, prepending the dictionary key to the metric name. Datasets must implement the
`__len__` method.
<Tip>
If you pass a dictionary with names of datasets as keys and datasets as values, evaluate will run
separate evaluations on each dataset. This can be useful to monitor how training affects other
datasets or simply to get a more fine-grained evaluation.
When used with `load_best_model_at_end`, make sure `metric_for_best_model` references exactly one
of the datasets. If you, for example, pass in `{"data1": data1, "data2": data2}` for two datasets
`data1` and `data2`, you could specify `metric_for_best_model="eval_data1_loss"` for using the
loss on `data1` and `metric_for_best_model="eval_data2_loss"` for the loss on `data2`.
</Tip>
ignore_keys (`list[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# handle multiple eval datasets
override = eval_dataset is not None
eval_dataset = eval_dataset if override else self.eval_dataset
if isinstance(eval_dataset, dict):
metrics = {}
for eval_dataset_name, _eval_dataset in eval_dataset.items():
dataset_metrics = self.evaluate(
eval_dataset=_eval_dataset if override else eval_dataset_name,
ignore_keys=ignore_keys,
metric_key_prefix=f"{metric_key_prefix}_{eval_dataset_name}",
)
metrics.update(dataset_metrics)
return metrics
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
if self.is_fsdp_xla_v2_enabled:
eval_dataloader = tpu_spmd_dataloader(eval_dataloader)
start_time = time.time()
output = self.evaluation_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_model_preparation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_model_preparation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: list[str] | None = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in `evaluate()`.
Args:
test_dataset (`Dataset`):
Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
`model.forward()` method are automatically removed. Has to implement the method `__len__`
ignore_keys (`list[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
<Tip>
If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
one array. The padding index is -100.
</Tip>
Returns: *NamedTuple* A namedtuple with the following keys:
- predictions (`np.ndarray`): The predictions on `test_dataset`.
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- metrics (`dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.evaluation_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_model_preparation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_model_preparation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: bool | None = None,
ignore_keys: list[str] | None = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train, handle model prep here
if self.is_deepspeed_enabled and self.deepspeed is None:
_, _ = deepspeed_init(self, num_training_steps=0, inference=True)
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
if len(self.accelerator._models) == 0 and model is self.model:
start_time = time.time()
model = (
self.accelerator.prepare(model)
if self.is_deepspeed_enabled
or (self.is_fsdp_enabled and self.accelerator.mixed_precision != "fp8" and not self.args.torch_compile)
else self.accelerator.prepare_model(model, evaluation_mode=True)
)
self.model_preparation_time = round(time.time() - start_time, 4)
if self.is_fsdp_enabled:
self.model = model
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# backward compatibility
if self.is_deepspeed_enabled:
self.deepspeed = self.model_wrapped
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = self.args.eval_batch_size
logger.info(f"\n***** Running {description} *****")
if has_length(dataloader):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
if hasattr(model, "eval") and callable(model.eval):
model.eval()
if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval):
self.optimizer.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = getattr(dataloader, "dataset", None)
# Initialize containers
all_losses = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100)
all_preds = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100)
all_labels = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100)
all_inputs = EvalLoopContainer(self.args.eval_do_concat_batches, padding_index=-100)
metrics = None
eval_set_kwargs = {}
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
losses, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
main_input_name = getattr(self.model, "main_input_name", "input_ids")
inputs_decode = (
self._prepare_input(inputs[main_input_name]) if "inputs" in args.include_for_metrics else None
)
if is_torch_xla_available():
xm.mark_step()
# Update containers
if losses is not None:
losses = self.gather_function(losses.repeat(batch_size))
all_losses.add(losses)
if inputs_decode is not None:
inputs_decode = self.accelerator.pad_across_processes(inputs_decode, dim=1, pad_index=-100)
inputs_decode = self.gather_function(inputs_decode)
if not self.args.batch_eval_metrics or description == "Prediction":
all_inputs.add(inputs_decode)
if labels is not None:
# Pad labels here, preparing for preprocess_logits_for_metrics in next logits block.
labels = self.accelerator.pad_across_processes(labels, dim=1, pad_index=-100)
if logits is not None:
logits = self.accelerator.pad_across_processes(logits, dim=1, pad_index=-100)
if self.preprocess_logits_for_metrics is not None:
logits = self.preprocess_logits_for_metrics(logits, labels)
logits = self.gather_function(logits)
if not self.args.batch_eval_metrics or description == "Prediction":
all_preds.add(logits)
if labels is not None:
labels = self.gather_function(labels)
if not self.args.batch_eval_metrics or description == "Prediction":
all_labels.add(labels)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
if self.args.batch_eval_metrics:
if self.compute_metrics is not None and logits is not None and labels is not None:
is_last_step = self.accelerator.gradient_state.end_of_dataloader
batch_kwargs = {}
batch_kwargs["losses"] = losses if "loss" in args.include_for_metrics else None
batch_kwargs["inputs"] = inputs if "inputs" in args.include_for_metrics else None
metrics = self.compute_metrics(
EvalPrediction(predictions=logits, label_ids=labels, **batch_kwargs),
compute_result=is_last_step,
)
del losses, logits, labels, inputs
torch.cuda.empty_cache()
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
elif args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
all_losses.to_cpu_and_numpy()
all_preds.to_cpu_and_numpy()
all_labels.to_cpu_and_numpy()
all_inputs.to_cpu_and_numpy()
del losses, logits, labels, inputs
torch.cuda.empty_cache()
# After all calls to `.gather_function`, reset to `gather_for_metrics`:
self.gather_function = self.accelerator.gather_for_metrics
# Gather all remaining tensors and put them back on the CPU
all_losses = all_losses.get_arrays()
all_preds = all_preds.get_arrays()
all_labels = all_labels.get_arrays()
all_inputs = all_inputs.get_arrays()
# Number of samples
if has_length(eval_dataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0:
num_samples = eval_dataset.num_examples
else:
if has_length(dataloader):
num_samples = self.num_examples(dataloader)
else: # both len(dataloader.dataset) and len(dataloader) fail
num_samples = observed_num_examples
if num_samples == 0 and observed_num_examples > 0:
num_samples = observed_num_examples
# Metrics!
if (
self.compute_metrics is not None
and all_preds is not None
and all_labels is not None
and not self.args.batch_eval_metrics
):
eval_set_kwargs["losses"] = all_losses if "loss" in args.include_for_metrics else None
eval_set_kwargs["inputs"] = all_inputs if "inputs" in args.include_for_metrics else None
metrics = self.compute_metrics(
EvalPrediction(predictions=all_preds, label_ids=all_labels, **eval_set_kwargs)
)
elif metrics is None:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if isinstance(all_losses, list) and all_losses:
metrics[f"{metric_key_prefix}_loss"] = np.concatenate(all_losses).mean().item()
elif isinstance(all_losses, np.ndarray):
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
if hasattr(self, "model_preparation_time"):
metrics[f"{metric_key_prefix}_model_preparation_time"] = self.model_preparation_time
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_xla_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.parallel_mode == ParallelMode.DISTRIBUTED:
tensors = distributed_concat(tensors)
return tensors
def prediction_step(
self,
model: nn.Module,
inputs: dict[str, torch.Tensor | Any],
prediction_loss_only: bool,
ignore_keys: list[str] | None = None,
) -> tuple[torch.Tensor | None, torch.Tensor | None, torch.Tensor | None]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
ignore_keys (`list[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names)
# For CLIP-like models capable of returning loss values.
# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
# is `True` in `model.forward`.
return_loss = inputs.get("return_loss")
if return_loss is None:
return_loss = self.can_return_loss
loss_without_labels = len(self.label_names) == 0 and return_loss
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", ["past_key_values"])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels or loss_without_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels or loss_without_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels or loss_without_labels:
with self.compute_loss_context_manager():
num_items_in_batch = self._get_num_items_in_batch([inputs], self.args.device)
loss, outputs = self.compute_loss(
model, inputs, return_outputs=True, num_items_in_batch=num_items_in_batch
)
loss = loss.detach().mean()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
with self.compute_loss_context_manager():
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: dict[str, torch.Tensor | Any]):
"""
For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
operations for every backward + forward pass. If using another model, either implement such a method in the
model or subclass and override this method.
Args:
inputs (`dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
`int`: The number of floating-point operations.
"""
if (main_input := getattr(self.model, "main_input_name", "input_ids")) in inputs and hasattr(
self.model, "num_parameters"
):
return 6 * inputs[main_input].numel() * self.model.num_parameters(exclude_embeddings=True)
return 0
def init_hf_repo(self, token: str | None = None):
"""
Initializes a git repo in `self.args.hub_model_id`.
"""
# Only on process zero
if not self.is_world_process_zero():
return
if self.args.hub_model_id is None:
repo_name = Path(self.args.output_dir).absolute().name
else:
repo_name = self.args.hub_model_id
token = token if token is not None else self.args.hub_token
repo_url = create_repo(repo_name, token=token, private=self.args.hub_private_repo, exist_ok=True)
self.hub_model_id = repo_url.repo_id
self.push_in_progress = None
def create_model_card(
self,
language: str | None = None,
license: str | None = None,
tags: str | list[str] | None = None,
model_name: str | None = None,
finetuned_from: str | None = None,
tasks: str | list[str] | None = None,
dataset_tags: str | list[str] | None = None,
dataset: str | list[str] | None = None,
dataset_args: str | list[str] | None = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
language (`str`, *optional*):
The language of the model (if applicable)
license (`str`, *optional*):
The license of the model. Will default to the license of the pretrained model used, if the original
model given to the `Trainer` comes from a repo on the Hub.
tags (`str` or `list[str]`, *optional*):
Some tags to be included in the metadata of the model card.
model_name (`str`, *optional*):
The name of the model.
finetuned_from (`str`, *optional*):
The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
of the original model given to the `Trainer` (if it comes from the Hub).
tasks (`str` or `list[str]`, *optional*):
One or several task identifiers, to be included in the metadata of the model card.
dataset_tags (`str` or `list[str]`, *optional*):
One or several dataset tags, to be included in the metadata of the model card.
dataset (`str` or `list[str]`, *optional*):
One or several dataset identifiers, to be included in the metadata of the model card.
dataset_args (`str` or `list[str]`, *optional*):
One or several dataset arguments, to be included in the metadata of the model card.
"""
if not self.is_world_process_zero():
return
model_card_filepath = os.path.join(self.args.output_dir, "README.md")
is_peft_library = False
if os.path.exists(model_card_filepath):
library_name = ModelCard.load(model_card_filepath).data.get("library_name")
is_peft_library = library_name == "peft"
# Append existing tags in `tags`
existing_tags = ModelCard.load(model_card_filepath).data.tags
if tags is not None and existing_tags is not None:
if isinstance(tags, str):
tags = [tags]
for tag in existing_tags:
if tag not in tags:
tags.append(tag)
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(model_card_filepath, "w") as f:
f.write(model_card)
if is_peft_library:
self.accelerator.unwrap_model(self.model).create_or_update_model_card(self.args.output_dir)
def _push_from_checkpoint(self, checkpoint_folder):
# Only push from one node.
if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END:
return
# If we haven't finished the last push, we don't do this one unless args.hub_always_push=True.
if not self.args.hub_always_push and self.push_in_progress is not None and not self.push_in_progress.is_done():
return
output_dir = self.args.output_dir
# To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder
modeling_files = [CONFIG_NAME, GENERATION_CONFIG_NAME, WEIGHTS_NAME, SAFE_WEIGHTS_NAME]
# Add sharded checkpoints if we have an index
for index_file in [WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME]:
index_path = os.path.join(checkpoint_folder, index_file)
if os.path.isfile(index_path):
modeling_files.append(index_file)
with open(index_path) as f:
index = json.loads(f.read())
shard_files = list(set(index["weight_map"].values()))
modeling_files.extend(shard_files)
if is_peft_available():
modeling_files.extend([ADAPTER_CONFIG_NAME, ADAPTER_WEIGHTS_NAME, ADAPTER_SAFE_WEIGHTS_NAME])
for modeling_file in modeling_files:
if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)):
shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file))
# Saving the processing class is fast and we don't know how many files it may have spawned, so we resave it to be sure.
if self.processing_class is not None:
self.processing_class.save_pretrained(output_dir)
# Same for the training arguments
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
if self.args.save_strategy == SaveStrategy.STEPS:
commit_message = f"Training in progress, step {self.state.global_step}"
else:
commit_message = f"Training in progress, epoch {int(self.state.epoch)}"
model_push_job = upload_folder(
repo_id=self.hub_model_id,
folder_path=output_dir,
commit_message=commit_message,
token=self.args.hub_token,
run_as_future=True,
ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"],
revision=self.args.hub_revision,
)
push_jobs = [model_push_job]
if self.args.hub_strategy in [HubStrategy.CHECKPOINT, HubStrategy.ALL_CHECKPOINTS]:
path_in_repo = (
"last-checkpoint" if self.args.hub_strategy == HubStrategy.CHECKPOINT else Path(checkpoint_folder).name
)
checkpoint_push = upload_folder(
repo_id=self.hub_model_id,
folder_path=checkpoint_folder,
path_in_repo=path_in_repo,
commit_message=commit_message + ", checkpoint",
token=self.args.hub_token,
run_as_future=True,
revision=self.args.hub_revision,
)
push_jobs.append(checkpoint_push)
if self.push_in_progress is None or self.push_in_progress.is_done():
self.push_in_progress = PushInProgress(push_jobs)
else:
self.push_in_progress.jobs.extend(push_jobs)
def _finish_current_push(self):
if not hasattr(self, "push_in_progress"):
return
if self.push_in_progress is not None and not self.push_in_progress.is_done():
logger.info("Waiting for the current checkpoint push to be finished, this might take a couple of minutes.")
self.push_in_progress.wait_until_done()
def push_to_hub(
self,
commit_message: str | None = "End of training",
blocking: bool = True,
token: str | None = None,
revision: str | None = None,
**kwargs,
) -> CommitInfo:
"""
Upload `self.model` and `self.processing_class` to the 🤗 model hub on the repo `self.args.hub_model_id`.
Parameters:
commit_message (`str`, *optional*, defaults to `"End of training"`):
Message to commit while pushing.
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the `git push` has finished.
token (`str`, *optional*, defaults to `None`):
Token with write permission to overwrite Trainer's original args.
revision (`str`, *optional*):
The git revision to commit from. Defaults to the head of the "main" branch.
kwargs (`dict[str, Any]`, *optional*):
Additional keyword arguments passed along to [`~Trainer.create_model_card`].
Returns:
The URL of the repository where the model was pushed if `blocking=False`, or a `Future` object tracking the
progress of the commit if `blocking=True`.
"""
model_name = kwargs.pop("model_name", None)
if model_name is None and self.args.should_save:
if self.args.hub_model_id is None:
model_name = Path(self.args.output_dir).name
else:
model_name = self.args.hub_model_id.split("/")[-1]
token = token if token is not None else self.args.hub_token
# In case the user calls this method with args.push_to_hub = False
if self.hub_model_id is None:
self.init_hf_repo(token=token)
# Needs to be executed on all processes for TPU training, but will only save on the processed determined by
# self.args.should_save.
self.save_model(_internal_call=True)
# Only push from one node.
if not self.is_world_process_zero():
return
# Add additional tags in the case the model has already some tags and users pass
# "tags" argument to `push_to_hub` so that trainer automatically handles internal tags
# from all models since Trainer does not call `model.push_to_hub`.
if getattr(self.model, "model_tags", None) is not None:
if "tags" not in kwargs:
kwargs["tags"] = []
# If it is a string, convert it to a list
if isinstance(kwargs["tags"], str):
kwargs["tags"] = [kwargs["tags"]]
for model_tag in self.model.model_tags:
if model_tag not in kwargs["tags"]:
kwargs["tags"].append(model_tag)
self.create_model_card(model_name=model_name, **kwargs)
if revision is None:
revision = self.args.hub_revision
# Wait for the current upload to be finished.
self._finish_current_push()
return upload_folder(
repo_id=self.hub_model_id,
folder_path=self.args.output_dir,
commit_message=commit_message,
token=token,
run_as_future=not blocking,
ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"],
revision=revision,
)
def _add_sm_patterns_to_gitignore(self) -> None:
"""Add SageMaker Checkpointing patterns to .gitignore file."""
# Make sure we only do this on the main process
if not self.is_world_process_zero():
return
patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"]
# Get current .gitignore content
if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")):
with open(os.path.join(self.repo.local_dir, ".gitignore")) as f:
current_content = f.read()
else:
current_content = ""
# Add the patterns to .gitignore
content = current_content
for pattern in patterns:
if pattern not in content:
if content.endswith("\n"):
content += pattern
else:
content += f"\n{pattern}"
# Write the .gitignore file if it has changed
if content != current_content:
with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f:
logger.debug(f"Writing .gitignore file. Content: {content}")
f.write(content)
self.repo.git_add(".gitignore")
# avoid race condition with git status
time.sleep(0.5)
if not self.repo.is_repo_clean():
self.repo.git_commit("Add *.sagemaker patterns to .gitignore.")
self.repo.git_push()
def create_accelerator_and_postprocess(self):
# We explicitly don't rely on the `Accelerator` to do gradient accumulation
grad_acc_kwargs = {}
if self.args.accelerator_config.gradient_accumulation_kwargs is not None:
grad_acc_kwargs = self.args.accelerator_config.gradient_accumulation_kwargs
# check if num_steps is attempted to be passed in gradient_accumulation_kwargs
if "num_steps" in grad_acc_kwargs:
if self.args.gradient_accumulation_steps > 1:
# raise because we do not know which setting is intended.
raise ValueError(
"The `AcceleratorConfig`'s `num_steps` is set but `gradient_accumulation_steps` is greater than 1 in the passed `TrainingArguments`"
"If using the passed `AcceleratorConfig` is desired, do not set the `TrainingArguments` `gradient_accumulation_steps`."
)
else:
self.args.gradient_accumulation_steps = grad_acc_kwargs["num_steps"]
accelerator_config = self.args.accelerator_config.to_dict()
# Extract dataloader config params from accelerator config
dataloader_params = ["split_batches", "dispatch_batches", "even_batches", "use_seedable_sampler"]
dataloader_config = DataLoaderConfiguration(
**{param: accelerator_config.pop(param) for param in dataloader_params}
)
dataloader_config.data_seed = self.args.data_seed
non_blocking = accelerator_config.pop("non_blocking")
if non_blocking and not self.args.dataloader_pin_memory:
logger.warning(
"`non_blocking` is enabled but `dataloader_pin_memory` is not. For the best performance, it's recommended to enable both."
)
dataloader_config.non_blocking = non_blocking
# this would have been updated above, no need for it anymore
accelerator_config.pop("gradient_accumulation_kwargs")
fsdp_plugin = None
if self.args.fsdp_plugin_args is not None:
from accelerate.utils import FullyShardedDataParallelPlugin
fsdp_plugin = FullyShardedDataParallelPlugin(**self.args.fsdp_plugin_args)
args = {
"mixed_precision": self.args.mixed_precision,
"dataloader_config": dataloader_config,
"fsdp_plugin": fsdp_plugin,
"deepspeed_plugin": self.args.deepspeed_plugin,
}
# We defer compatibility checks to accelerator
if self.args.parallelism_config is not None:
min_accelerate_version = "1.12.0"
if not is_accelerate_available(min_accelerate_version):
raise ImportError(
f"ParallelismConfig requires accelerate>={min_accelerate_version}). Please upgrade accelerate to use this feature."
)
args["parallelism_config"] = self.args.parallelism_config
self.is_tp_enabled = False
if getattr(self.model, "tp_size", None) is not None and self.model.tp_size > 1:
self.is_tp_enabled = True
if self.args.parallelism_config is not None:
if is_accelerate_available("1.10.1"):
if self.args.parallelism_config is not None:
from accelerate import ParallelismConfig
args["parallelism_config"] = ParallelismConfig(tp_size=self.model.tp_size)
else:
raise ValueError("Requires accelerate>1.10.1 to use Tensor Parallelism.")
if is_accelerate_available("1.2.0"):
# it we don't have the correct version, we will rely on env var instead that were set in TrainingArguments
from accelerate.utils import TorchDynamoPlugin
dynamo_plugin = TorchDynamoPlugin(
backend=self.args.torch_compile_backend, mode=self.args.torch_compile_mode
)
args["dynamo_plugin"] = dynamo_plugin
# create accelerator object
self.accelerator = Accelerator(**args)
# some Trainer classes need to use `gather` instead of `gather_for_metrics`, thus we store a flag
self.gather_function = self.accelerator.gather_for_metrics
if "use_gather_object" in inspect.signature(self.gather_function).parameters:
self.gather_function = functools.partial(
self.gather_function, use_gather_object=self.args.eval_use_gather_object
)
# deepspeed and accelerate flags covering both trainer args and accelerate launcher
self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None
self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None
# post accelerator creation setup
if self.is_fsdp_enabled:
fsdp_plugin = self.accelerator.state.fsdp_plugin
for param in ["limit_all_gathers", "activation_checkpointing"]:
setattr(fsdp_plugin, param, self.args.fsdp_config.get(param, getattr(fsdp_plugin, param)))
if fsdp_plugin.activation_checkpointing and self.args.gradient_checkpointing:
raise ValueError(
"The activation_checkpointing in FSDP config and the gradient_checkpointing in training arg "
"can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic "
"when using FSDP."
)
if self.is_deepspeed_enabled and getattr(self.args, "hf_deepspeed_config", None) is None:
self.propagate_args_to_deepspeed()
# `save_only_model` can't be used with DeepSpeed/FSDP along with `load_best_model_at_end`
if (
self.args.save_only_model
and (self.is_deepspeed_enabled or self.is_fsdp_enabled)
and self.args.load_best_model_at_end
):
wrapper = "DeepSpeed" if self.is_deepspeed_enabled else "FSDP"
raise ValueError(f"{wrapper} can't be used with `save_only_model` along with `load_best_model_at_end`.")
# `auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3
if (
self.is_deepspeed_enabled
and self.accelerator.state.deepspeed_plugin.zero_stage == 3
and self.args.auto_find_batch_size
):
raise ValueError(
"`auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3. Please consider using Zero-2, Zero-1, or FSDP"
)
if (
self.args.save_only_model
and self.is_fsdp_enabled
and "SHARDED_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type)
):
raise ValueError("save_only_model option is not compatible with FSDP state dict type 'SHARDED_STATE_DICT'")
def propagate_args_to_deepspeed(self, auto_find_batch_size=False):
"""
Sets values in the deepspeed plugin based on the Trainer args
"""
from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig
ds_plugin = self.accelerator.state.deepspeed_plugin
ds_plugin.hf_ds_config = HfTrainerDeepSpeedConfig(ds_plugin.hf_ds_config.config)
ds_plugin.deepspeed_config = ds_plugin.hf_ds_config.config
ds_plugin.hf_ds_config.trainer_config_process(self.args, auto_find_batch_size)
def _fsdp_qlora_plugin_updates(self):
if self.is_fsdp_enabled and _is_peft_model(self.model):
from peft import PeftConfig
from peft.utils.other import fsdp_auto_wrap_policy
if isinstance(self.model.active_peft_config, PeftConfig):
self.accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(self.model)
if (
getattr(self.model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES
and self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage.is_floating_point
):
self.accelerator.state.fsdp_plugin.set_mixed_precision(
self.model.hf_quantizer.quantization_config.bnb_4bit_quant_storage, override=True
)
def _get_num_items_in_batch(self, batch_samples: list, device: torch.device) -> torch.Tensor | int | None:
"""
Counts the number of items in the batches to properly scale the loss.
Args:
batch_samples (`list`): List of batches
device (`torch.device`): The device on which the number of items in the batch should be.
Returns:
None if the number of items in the batch doesn't need to be computed else the number of items in the batch
"""
num_items_in_batch = None
count_num_items_in_batch = (
len(batch_samples) > 0
and "labels" in batch_samples[0]
and (
# num_items_in_batch is passed to model forward
# https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/trainer.py#L3757
self.model_accepts_loss_kwargs
# num_items_in_batch is passed to compute_loss_func
# https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/trainer.py#L3773
or self.compute_loss_func is not None
# num_items_in_batch is also verified if (self.model_accepts_loss_kwargs or self.compute_loss_func)
# https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/trainer.py#L3790
)
)
if count_num_items_in_batch:
# For now we don't support object detection
try:
num_items_in_batch = sum((batch["labels"].ne(-100)).sum() for batch in batch_samples)
except (TypeError, AttributeError):
pass
if num_items_in_batch is not None:
if self.args.average_tokens_across_devices:
if self.args.world_size > 1:
num_items_in_batch = self.accelerator.gather(num_items_in_batch.to(device)).sum()
elif self.args.n_gpu > 1:
# In DP case, if we don't average, we need to divide by the number of gpu. This is the simplest approximation.
# Otherwise, we would have to scatter labels and calculate num_items_in_batch for each gpu.
num_items_in_batch = num_items_in_batch // self.args.n_gpu
if torch.is_tensor(num_items_in_batch):
num_items_in_batch = num_items_in_batch.to(device)
if self.args.n_gpu > 1 and num_items_in_batch.dim() == 0:
# In the DataParallel case, convert the scalar tensor into a 2-dim tensor with the same value repeated
num_items_in_batch = num_items_in_batch.unsqueeze(0).expand(self.args.n_gpu, -1)
# Divide by number of devices with the same batch
if pc := getattr(self.accelerator, "parallelism_config", None):
num_items_in_batch = num_items_in_batch // pc.non_data_parallel_size
return num_items_in_batch
def get_batch_samples(
self, epoch_iterator: Iterator, num_batches: int, device: torch.device
) -> tuple[list, torch.Tensor | int | None]:
"""
Collects a specified number of batches from the epoch iterator and optionally counts the number of items in the batches to properly scale the loss.
"""
batch_samples = []
for _ in range(num_batches):
try:
batch_samples.append(next(epoch_iterator))
except StopIteration:
break
num_items_in_batch = self._get_num_items_in_batch(batch_samples, device)
return batch_samples, num_items_in_batch
def set_initial_training_values(
self, args: TrainingArguments, dataloader: DataLoader, total_train_batch_size: int
):
"""
Calculates and returns the following values:
- `num_train_epochs`
- `num_update_steps_per_epoch`
- `num_examples`
- `num_train_samples`
- `epoch_based`
- `len_dataloader`
- `max_steps`
"""
# Case 1: we rely on `args.max_steps` first
max_steps = args.max_steps
# If max_steps is negative, we use the number of epochs to determine the number of total steps later
epoch_based = max_steps < 0
len_dataloader = len(dataloader) if has_length(dataloader) else None
# Account for Sequence Parallelism (SP) dataloader adapter's effect
sp_size = self.get_sp_size()
if sp_size > 1 and len_dataloader is not None:
len_dataloader = len_dataloader * sp_size
# Case 2: We have a dataloader length and can extrapolate
if len_dataloader is not None:
num_update_steps_per_epoch = max(
len_dataloader // args.gradient_accumulation_steps
+ int(len_dataloader % args.gradient_accumulation_steps > 0),
1,
)
# Case 3: We have a length but are using epochs, we can extrapolate the number of steps
if epoch_based:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
# Now we figure out `num_examples`, `num_train_epochs`, and `train_samples`
if len_dataloader:
num_examples = self.num_examples(dataloader)
if args.max_steps > 0:
num_train_epochs = max_steps // num_update_steps_per_epoch + int(
max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's
# the best we can do.
num_train_samples = max_steps * total_train_batch_size
else:
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = self.num_examples(dataloader) * args.num_train_epochs
elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_examples = total_train_batch_size * args.max_steps
num_train_samples = args.max_steps * total_train_batch_size
else:
raise ValueError(
"args.max_steps must be set to a positive value if dataloader does not have a length, was"
f" {args.max_steps}"
)
return (
num_train_epochs,
num_update_steps_per_epoch,
num_examples,
num_train_samples,
epoch_based,
len_dataloader,
max_steps,
)
| Trainer |
python | django__django | tests/model_inheritance/models.py | {
"start": 1113,
"end": 1406
} | class ____(models.Model):
post = models.ForeignKey(
Post,
models.CASCADE,
related_name="attached_%(class)s_set",
related_query_name="attached_%(app_label)s_%(class)ss",
)
content = models.TextField()
class Meta:
abstract = True
| Attachment |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 52077,
"end": 52854
} | class ____(ObjectBaseModel):
"""An ORM representation of a worker"""
name: str = Field(description="The name of the worker.")
work_pool_id: UUID = Field(
description="The work pool with which the queue is associated."
)
last_heartbeat_time: Optional[datetime.datetime] = Field(
default=None, description="The last time the worker process sent a heartbeat."
)
heartbeat_interval_seconds: Optional[int] = Field(
default=None,
description=(
"The number of seconds to expect between heartbeats sent by the worker."
),
)
status: WorkerStatus = Field(
WorkerStatus.OFFLINE,
description="Current status of the worker.",
)
Flow.model_rebuild()
# FlowRun.model_rebuild()
| Worker |
python | PyCQA__pylint | doc/data/messages/t/too-many-ancestors/bad.py | {
"start": 242,
"end": 359
} | class ____(Animal): ...
# max of 7 by default, can be configured
# each edge of a diamond inheritance counts
| Vertebrate |
python | astropy__astropy | astropy/table/tests/test_init_table.py | {
"start": 13186,
"end": 13677
} | class ____(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = OrderedDict(
[
("a", Column(name="x", data=[1, 3])),
("b", [2, 4]),
("c", np.array([3, 5], dtype="i8")),
]
)
def test_col_order(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert t.colnames == ["a", "b", "c"]
@pytest.mark.usefixtures("table_type")
| TestInitFromOrderedDict |
python | walkccc__LeetCode | solutions/6. ZigZag Conversion/6.py | {
"start": 0,
"end": 281
} | class ____:
def convert(self, s: str, numRows: int) -> str:
rows = [''] * numRows
k = 0
direction = (numRows == 1) - 1
for c in s:
rows[k] += c
if k == 0 or k == numRows - 1:
direction *= -1
k += direction
return ''.join(rows)
| Solution |
python | django__django | tests/invalid_models_tests/test_ordinary_fields.py | {
"start": 40273,
"end": 44890
} | class ____(TestCase):
def test_db_default(self):
class Model(models.Model):
field = models.FloatField(db_default=Pi())
field = Model._meta.get_field("field")
errors = field.check(databases=self.databases)
if connection.features.supports_expression_defaults:
expected_errors = []
else:
msg = (
f"{connection.display_name} does not support default database values "
"with expressions (db_default)."
)
expected_errors = [Error(msg=msg, obj=field, id="fields.E011")]
self.assertEqual(errors, expected_errors)
def test_db_default_literal(self):
class Model(models.Model):
field = models.IntegerField(db_default=1)
field = Model._meta.get_field("field")
errors = field.check(databases=self.databases)
self.assertEqual(errors, [])
def test_db_default_required_db_features(self):
class Model(models.Model):
field = models.FloatField(db_default=Pi())
class Meta:
required_db_features = {"supports_expression_defaults"}
field = Model._meta.get_field("field")
errors = field.check(databases=self.databases)
self.assertEqual(errors, [])
def test_db_default_expression_invalid(self):
expression = models.F("field_name")
class Model(models.Model):
field = models.FloatField(db_default=expression)
field = Model._meta.get_field("field")
errors = field.check(databases=self.databases)
if connection.features.supports_expression_defaults:
msg = f"{expression} cannot be used in db_default."
expected_errors = [Error(msg=msg, obj=field, id="fields.E012")]
else:
msg = (
f"{connection.display_name} does not support default database values "
"with expressions (db_default)."
)
expected_errors = [Error(msg=msg, obj=field, id="fields.E011")]
self.assertEqual(errors, expected_errors)
def test_db_default_expression_required_db_features(self):
expression = models.F("field_name")
class Model(models.Model):
field = models.FloatField(db_default=expression)
class Meta:
required_db_features = {"supports_expression_defaults"}
field = Model._meta.get_field("field")
errors = field.check(databases=self.databases)
if connection.features.supports_expression_defaults:
msg = f"{expression} cannot be used in db_default."
expected_errors = [Error(msg=msg, obj=field, id="fields.E012")]
else:
expected_errors = []
self.assertEqual(errors, expected_errors)
@skipUnlessDBFeature("supports_expression_defaults")
def test_db_default_combined_invalid(self):
expression = models.Value(4.5) + models.F("field_name")
class Model(models.Model):
field = models.FloatField(db_default=expression)
field = Model._meta.get_field("field")
errors = field.check(databases=self.databases)
msg = f"{expression} cannot be used in db_default."
expected_error = Error(msg=msg, obj=field, id="fields.E012")
self.assertEqual(errors, [expected_error])
@skipUnlessDBFeature("supports_expression_defaults")
def test_db_default_function_arguments_invalid(self):
expression = Coalesce(models.Value(4.5), models.F("field_name"))
class Model(models.Model):
field = models.FloatField(db_default=expression)
field = Model._meta.get_field("field")
errors = field.check(databases=self.databases)
msg = f"{expression} cannot be used in db_default."
expected_error = Error(msg=msg, obj=field, id="fields.E012")
self.assertEqual(errors, [expected_error])
def test_literals_not_treated_as_expressions(self):
"""
DatabaseFeatures.supports_expression_defaults = False shouldn't
prevent non-expression literals (integer, float, boolean, etc.) from
being used as database defaults.
"""
class Model(models.Model):
field = models.FloatField(db_default=1.0)
field = Model._meta.get_field("field")
with unittest.mock.patch.object(
connection.features, "supports_expression_defaults", False
):
errors = field.check(databases=self.databases)
self.assertEqual(errors, [])
@isolate_apps("invalid_models_tests")
| InvalidDBDefaultTests |
python | pytest-dev__pytest | src/_pytest/outcomes.py | {
"start": 1990,
"end": 2284
} | class ____(Exception):
"""Raised for immediate program exits (no tracebacks/summaries)."""
def __init__(
self, msg: str = "unknown reason", returncode: int | None = None
) -> None:
self.msg = msg
self.returncode = returncode
super().__init__(msg)
| Exit |
python | pandas-dev__pandas | pandas/errors/__init__.py | {
"start": 5024,
"end": 5821
} | class ____(PandasDeprecationWarning):
"""
Warning raised for an upcoming change that will be enforced in pandas 4.0.
See Also
--------
errors.PandasChangeWarning: Class for deprecations that will raise any warning.
errors.PandasPendingDeprecationWarning : Class for deprecations that will raise a
PendingDeprecationWarning.
errors.PandasDeprecationWarning : Class for deprecations that will raise a
DeprecationWarning.
errors.PandasFutureWarning : Class for deprecations that will raise a FutureWarning.
Examples
--------
>>> pd.errors.Pandas4Warning
<class 'pandas.errors.Pandas4Warning'>
"""
@classmethod
def version(cls) -> str:
"""Version where change will be enforced."""
return "4.0"
| Pandas4Warning |
python | urllib3__urllib3 | src/urllib3/http2/probe.py | {
"start": 55,
"end": 3014
} | class ____:
__slots__ = (
"_lock",
"_cache_locks",
"_cache_values",
)
def __init__(self) -> None:
self._lock = threading.Lock()
self._cache_locks: dict[tuple[str, int], threading.RLock] = {}
self._cache_values: dict[tuple[str, int], bool | None] = {}
def acquire_and_get(self, host: str, port: int) -> bool | None:
# By the end of this block we know that
# _cache_[values,locks] is available.
value = None
with self._lock:
key = (host, port)
try:
value = self._cache_values[key]
# If it's a known value we return right away.
if value is not None:
return value
except KeyError:
self._cache_locks[key] = threading.RLock()
self._cache_values[key] = None
# If the value is unknown, we acquire the lock to signal
# to the requesting thread that the probe is in progress
# or that the current thread needs to return their findings.
key_lock = self._cache_locks[key]
key_lock.acquire()
try:
# If the by the time we get the lock the value has been
# updated we want to return the updated value.
value = self._cache_values[key]
# In case an exception like KeyboardInterrupt is raised here.
except BaseException as e: # Defensive:
assert not isinstance(e, KeyError) # KeyError shouldn't be possible.
key_lock.release()
raise
return value
def set_and_release(
self, host: str, port: int, supports_http2: bool | None
) -> None:
key = (host, port)
key_lock = self._cache_locks[key]
with key_lock: # Uses an RLock, so can be locked again from same thread.
if supports_http2 is None and self._cache_values[key] is not None:
raise ValueError(
"Cannot reset HTTP/2 support for origin after value has been set."
) # Defensive: not expected in normal usage
self._cache_values[key] = supports_http2
key_lock.release()
def _values(self) -> dict[tuple[str, int], bool | None]:
"""This function is for testing purposes only. Gets the current state of the probe cache"""
with self._lock:
return {k: v for k, v in self._cache_values.items()}
def _reset(self) -> None:
"""This function is for testing purposes only. Reset the cache values"""
with self._lock:
self._cache_locks = {}
self._cache_values = {}
_HTTP2_PROBE_CACHE = _HTTP2ProbeCache()
set_and_release = _HTTP2_PROBE_CACHE.set_and_release
acquire_and_get = _HTTP2_PROBE_CACHE.acquire_and_get
_values = _HTTP2_PROBE_CACHE._values
_reset = _HTTP2_PROBE_CACHE._reset
__all__ = [
"set_and_release",
"acquire_and_get",
]
| _HTTP2ProbeCache |
python | zarr-developers__zarr-python | src/zarr/core/dtype/npy/structured.py | {
"start": 1645,
"end": 2224
} | class ____(
NamedConfig[Literal["structured"], dict[str, Sequence[Sequence[str | DTypeJSON]]]]
):
"""
A JSON representation of a structured data type in Zarr V3.
References
----------
This representation is not currently defined in an external specification.
Examples
--------
```python
{
"name": "structured",
"configuration": {
"fields": [
["f0", "int32"],
["f1", "float64"],
]
}
}
```
"""
@dataclass(frozen=True, kw_only=True)
| StructuredJSON_V3 |
python | walkccc__LeetCode | solutions/2934. Minimum Operations to Maximize Last Elements in Arrays/2934.py | {
"start": 0,
"end": 663
} | class ____:
def minOperations(self, nums1: list[int], nums2: list[int]) -> int:
n = len(nums1)
mn = min(nums1[-1], nums2[-1])
mx = max(nums1[-1], nums2[-1])
# the number of the minimum operations, where nums1[n - 1] is not swapped
# with nums2[n - 1]
dp1 = 0
# the number of the minimum operations, where nums1[n - 1] is swapped with
# nums2[n - 1]
dp2 = 0
for a, b in zip(nums1, nums2):
if min(a, b) > mn:
return -1
if max(a, b) > mx:
return -1
if a > nums1[-1] or b > nums2[-1]:
dp1 += 1
if a > nums2[-1] or b > nums1[-1]:
dp2 += 1
return min(dp1, dp2)
| Solution |
python | ansible__ansible | lib/ansible/cli/scripts/ansible_connection_cli_stub.py | {
"start": 1694,
"end": 13121
} | class ____(object):
"""
The connection process wraps around a Connection object that manages
the connection to a remote device that persists over the playbook
"""
def __init__(self, fd, play_context, socket_path, original_path, task_uuid=None, ansible_playbook_pid=None):
self.play_context = play_context
self.socket_path = socket_path
self.original_path = original_path
self._task_uuid = task_uuid
self.fd = fd
self.exception = None
self.srv = JsonRpcServer()
self.sock = None
self.connection = None
self._ansible_playbook_pid = ansible_playbook_pid
def start(self, options):
messages = list()
result = {}
try:
messages.append(('vvvv', 'control socket path is %s' % self.socket_path))
# If this is a relative path (~ gets expanded later) then plug the
# key's path on to the directory we originally came from, so we can
# find it now that our cwd is /
if self.play_context.private_key_file and self.play_context.private_key_file[0] not in '~/':
self.play_context.private_key_file = os.path.join(self.original_path, self.play_context.private_key_file)
self.connection = connection_loader.get(self.play_context.connection, self.play_context, '/dev/null',
task_uuid=self._task_uuid, ansible_playbook_pid=self._ansible_playbook_pid)
try:
self.connection.set_options(direct=options)
except ConnectionError as exc:
messages.append(('debug', to_text(exc)))
raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
self.connection._socket_path = self.socket_path
self.srv.register(self.connection)
messages.extend([('vvvv', msg) for msg in sys.stdout.getvalue().splitlines()])
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.bind(self.socket_path)
self.sock.listen(1)
messages.append(('vvvv', 'local domain socket listeners started successfully'))
except Exception as exc:
messages.extend(self.connection.pop_messages())
result['error'] = to_text(exc)
result['exception'] = traceback.format_exc()
finally:
result['messages'] = messages
self.fd.write(json.dumps(result, cls=_tagless.Encoder))
self.fd.close()
def run(self):
try:
log_messages = self.connection.get_option('persistent_log_messages')
while not self.connection._conn_closed:
signal.signal(signal.SIGALRM, self.connect_timeout)
signal.signal(signal.SIGTERM, self.handler)
signal.alarm(self.connection.get_option('persistent_connect_timeout'))
self.exception = None
(s, addr) = self.sock.accept()
signal.alarm(0)
signal.signal(signal.SIGALRM, self.command_timeout)
while True:
data = recv_data(s)
if not data:
break
if log_messages:
display.display("jsonrpc request: %s" % data, log_only=True)
request = json.loads(to_text(data, errors='surrogate_or_strict'))
if request.get('method') == "exec_command" and not self.connection.connected:
self.connection._connect()
signal.alarm(self.connection.get_option('persistent_command_timeout'))
resp = self.srv.handle_request(data)
signal.alarm(0)
if log_messages:
display.display("jsonrpc response: %s" % resp, log_only=True)
send_data(s, to_bytes(resp))
s.close()
except Exception as e:
# socket.accept() will raise EINTR if the socket.close() is called
if hasattr(e, 'errno'):
if e.errno != errno.EINTR:
self.exception = traceback.format_exc()
else:
self.exception = traceback.format_exc()
finally:
# allow time for any exception msg send over socket to receive at other end before shutting down
time.sleep(0.1)
# when done, close the connection properly and cleanup the socket file so it can be recreated
self.shutdown()
def connect_timeout(self, signum, frame):
msg = 'persistent connection idle timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and ' \
'Troubleshooting Guide.' % self.connection.get_option('persistent_connect_timeout')
display.display(msg, log_only=True)
raise Exception(msg)
def command_timeout(self, signum, frame):
msg = 'command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide.'\
% self.connection.get_option('persistent_command_timeout')
display.display(msg, log_only=True)
raise Exception(msg)
def handler(self, signum, frame):
msg = 'signal handler called with signal %s.' % signum
display.display(msg, log_only=True)
raise Exception(msg)
def shutdown(self):
""" Shuts down the local domain socket
"""
lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(self.socket_path))
if os.path.exists(self.socket_path):
try:
if self.sock:
self.sock.close()
if self.connection:
self.connection.close()
if self.connection.get_option("persistent_log_messages"):
for _level, message in self.connection.pop_messages():
display.display(message, log_only=True)
except Exception:
pass
finally:
if os.path.exists(self.socket_path):
os.remove(self.socket_path)
setattr(self.connection, '_socket_path', None)
setattr(self.connection, '_connected', False)
if os.path.exists(lock_path):
os.remove(lock_path)
display.display('shutdown complete', log_only=True)
def main(args=None):
""" Called to initiate the connect to the remote device
"""
parser = opt_help.create_base_parser(prog=None)
opt_help.add_verbosity_options(parser)
parser.add_argument('playbook_pid')
parser.add_argument('task_uuid')
args = parser.parse_args(args[1:] if args is not None else args)
init_plugin_loader()
# initialize verbosity
display.verbosity = args.verbosity
rc = 0
result = {}
messages = list()
socket_path = None
# Need stdin as a byte stream
stdin = sys.stdin.buffer
# Note: update the below log capture code after Display.display() is refactored.
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
# read the play context data via stdin, which means depickling it
opts_data = read_stream(stdin)
init_data = read_stream(stdin)
pc_data = pickle.loads(init_data, encoding='bytes')
options = pickle.loads(opts_data, encoding='bytes')
play_context = PlayContext()
play_context.from_attrs(pc_data)
except Exception as e:
rc = 1
result.update({
'error': to_text(e),
'exception': traceback.format_exc()
})
if rc == 0:
ssh = connection_loader.get('ssh', class_only=True)
ansible_playbook_pid = args.playbook_pid
task_uuid = args.task_uuid
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid)
# create the persistent connection dir if need be and create the paths
# which we will be using later
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
makedirs_safe(tmp_path)
socket_path = unfrackpath(cp % dict(directory=tmp_path))
lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(socket_path))
with file_lock(lock_path):
if not os.path.exists(socket_path):
messages.append(('vvvv', 'local domain socket does not exist, starting it'))
original_path = os.getcwd()
r, w = os.pipe()
pid = fork_process()
if pid == 0:
try:
os.close(r)
wfd = os.fdopen(w, 'w')
process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, ansible_playbook_pid)
process.start(options)
except Exception:
messages.append(('error', traceback.format_exc()))
rc = 1
if rc == 0:
process.run()
else:
process.shutdown()
sys.exit(rc)
else:
os.close(w)
rfd = os.fdopen(r, 'r')
data = json.loads(rfd.read(), cls=_tagless.Decoder)
messages.extend(data.pop('messages'))
result.update(data)
else:
messages.append(('vvvv', 'found existing local domain socket, using it!'))
conn = Connection(socket_path)
try:
conn.set_options(direct=options)
except ConnectionError as exc:
messages.append(('debug', to_text(exc)))
raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
pc_data = to_text(init_data)
try:
conn.update_play_context(pc_data)
conn.set_check_prompt(task_uuid)
except Exception as exc:
# Only network_cli has update_play context and set_check_prompt, so missing this is
# not fatal e.g. netconf
if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601:
pass
else:
result.update({
'error': to_text(exc),
'exception': traceback.format_exc()
})
if os.path.exists(socket_path):
messages.extend(Connection(socket_path).pop_messages())
messages.append(('vvvv', sys.stdout.getvalue()))
result.update({
'messages': messages,
'socket_path': socket_path
})
sys.stdout = saved_stdout
if 'exception' in result:
rc = 1
sys.stderr.write(json.dumps(result, cls=_tagless.Encoder))
else:
rc = 0
sys.stdout.write(json.dumps(result, cls=_tagless.Encoder))
sys.exit(rc)
if __name__ == '__main__':
main()
| ConnectionProcess |
python | huggingface__transformers | src/transformers/models/sam_hq/modeling_sam_hq.py | {
"start": 49238,
"end": 54227
} | class ____(nn.Module):
def __init__(self, config: SamHQConfig):
super().__init__()
self.shared_embedding = SamHQPositionalEmbedding(config.vision_config)
config = config.prompt_encoder_config
self.mask_embed = SamHQMaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size)
self.input_image_size = config.image_size
self.point_embed = nn.ModuleList(
[nn.Embedding(1, config.hidden_size) for i in range(config.num_point_embeddings)]
)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
target_point_shape = (points.shape[0], points.shape[1], 1, points.shape[-1])
target_labels_shape = (points.shape[0], points.shape[1], 1)
padding_point = torch.zeros(target_point_shape, device=points.device)
padding_label = -torch.ones(target_labels_shape, device=labels.device)
points = torch.cat([points, padding_point], dim=2)
labels = torch.cat([labels, padding_label], dim=2)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(labels[..., None] != -10, point_embedding, torch.zeros_like(point_embedding))
point_embedding = torch.where(
(labels == 0)[:, :, :, None],
point_embedding + self.point_embed[0].weight[None, None, :, :],
point_embedding,
)
point_embedding = torch.where(
(labels == 1)[:, :, :, None],
point_embedding + self.point_embed[1].weight[None, None, :, :],
point_embedding,
)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
batch_size, nb_boxes = boxes.shape[:2]
coords = boxes.reshape(batch_size, nb_boxes, 2, 2)
input_shape = (self.input_image_size, self.input_image_size)
corner_embedding = self.shared_embedding(coords, input_shape)
corner_embedding[:, :, 0, :] += self.point_embed[2].weight
corner_embedding[:, :, 1, :] += self.point_embed[3].weight
return corner_embedding
def forward(
self,
input_points: Optional[tuple[torch.Tensor, torch.Tensor]],
input_labels: Optional[torch.Tensor],
input_boxes: Optional[torch.Tensor],
input_masks: Optional[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense embeddings.
Args:
points (`torch.Tensor`, *optional*):
point coordinates and labels to embed.
boxes (`torch.Tensor`, *optional*):
boxes to embed
masks (`torch.Tensor`, *optional*):
masks to embed
"""
sparse_embeddings = None
batch_size = 1
if input_points is not None:
batch_size = input_points.shape[0]
if input_labels is None:
raise ValueError("If points are provided, labels must also be provided.")
point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
sparse_embeddings = point_embeddings
if input_boxes is not None:
batch_size = input_boxes.shape[0]
box_embeddings = self._embed_boxes(input_boxes)
if sparse_embeddings is None:
sparse_embeddings = box_embeddings
else:
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)
if input_masks is not None:
dense_embeddings = self.mask_embed(input_masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
@auto_docstring(
custom_intro="""
Segment Anything Model HQ (SAM-HQ) for generating masks, given an input image and optional 2D location and bounding boxes.
"""
)
| SamHQPromptEncoder |
python | huggingface__transformers | src/transformers/models/janus/modeling_janus.py | {
"start": 24613,
"end": 27416
} | class ____(nn.Module):
"""
A module for vector quantization using learned embedding vectors.
This module implements the quantization process similar to te one described in
the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
input vectors into discrete codebook vectors, which are learned during training.
Current implementation improves over previous ones by avoiding costly matrix multiplications
and allowing for post-hoc remapping of indices.
"""
def __init__(self, config: JanusVQVAEConfig):
super().__init__()
self.num_embeddings = config.num_embeddings
self.embedding_dim = config.embed_dim
self.beta = getattr(config, "beta", 0.25)
self.embedding = nn.Embedding(self.num_embeddings, self.embedding_dim)
self.quant_state_dims = [config.num_patches] * 2
def forward(self, hidden_state: torch.Tensor):
hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
hidden_state_flattened = hidden_state.view(-1, self.embedding_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
distances = (
torch.sum(hidden_state_flattened**2, dim=1, keepdim=True)
+ torch.sum(self.embedding.weight**2, dim=1)
- 2 * torch.einsum("bd,dn->bn", hidden_state_flattened, self.embedding.weight.transpose(0, 1))
)
min_encoding_indices = torch.argmin(distances, dim=1)
hidden_state_quant = self.embedding(min_encoding_indices).view(hidden_state.shape)
# compute loss for embedding
loss = torch.mean((hidden_state_quant.detach() - hidden_state) ** 2) + self.beta * torch.mean(
(hidden_state_quant - hidden_state.detach()) ** 2
)
# preserve gradients
hidden_state_quant = hidden_state + (hidden_state_quant - hidden_state).detach()
# reshape back to match original input shape
hidden_state_quant = hidden_state_quant.permute(0, 3, 1, 2).contiguous()
return hidden_state_quant, loss, min_encoding_indices
def get_codebook_entry(self, image_tokens: torch.LongTensor) -> torch.FloatTensor:
batch_size = image_tokens.shape[0]
emb_dim: int = self.embedding.weight.shape[-1]
# get quantized latent vectors
hidden_state_quant = self.embedding(image_tokens)
# l2 normalization on the last dimension
hidden_state_quant = F.normalize(hidden_state_quant, p=2, dim=-1)
# reshape back to match original input shape
hidden_state_quant = hidden_state_quant.view((batch_size, *self.quant_state_dims, emb_dim))
hidden_state_quant = hidden_state_quant.permute(0, 3, 1, 2).contiguous()
return hidden_state_quant
| JanusVQVAEVectorQuantizer |
python | getsentry__sentry | src/sentry/integrations/slack/requests/action.py | {
"start": 338,
"end": 5258
} | class ____(SlackRequest):
"""
An Action request sent from Slack.
Action requests nest their data inside of a ``payload`` key in the request
body, for some reason. Therefore they require an extra bit of data
validation.
"""
@property
def type(self) -> str:
return str(self.data.get("type"))
@cached_property
def callback_data(self) -> Any:
"""
We store certain data in ``callback_id`` as JSON. It's a bit hacky, but
it's the simplest way to store state without saving it on the Sentry
side.
Data included in this field:
- issue: the ID of the corresponding Issue
- orig_response_url: URL from the original message we received
- is_message: did the original message have a 'message' type
"""
if self.data.get("callback_id"):
return orjson.loads(self.data["callback_id"])
# XXX(CEO): can't really feature flag this but the block kit data is very different
# slack sends us a response when a modal is opened and when an option is selected
# we don't do anything with it until the user hits "Submit" but we need to handle it anyway
if self.data["type"] == "block_actions":
if self.data.get("view"):
return orjson.loads(self.data["view"]["private_metadata"])
elif self.data.get("container", {}).get(
"is_app_unfurl"
): # for actions taken on interactive unfurls
return orjson.loads(
self.data["app_unfurl"]["blocks"][0]["block_id"],
)
return orjson.loads(self.data["message"]["blocks"][0]["block_id"])
if self.data["type"] == "view_submission":
return orjson.loads(self.data["view"]["private_metadata"])
for data in self.data["message"]["blocks"]:
if data["type"] == "section" and len(data["block_id"]) > 5:
return orjson.loads(data["block_id"])
# a bit hacky, you can only provide a block ID per block (not per entire message),
# and if not provided slack generates a 5 char long one. our provided block_id is at least '{issue: <issue_id>}'
# so we know it's longer than 5 chars
def _validate_data(self) -> None:
"""
Action requests provide the body of the request differently than Event
requests (nested in a ``payload`` attribute), so there's extra
validation needed.
"""
super()._validate_data()
if "payload" not in self.request.data:
raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST)
try:
self._data = orjson.loads(self.data["payload"])
except (KeyError, IndexError, TypeError, ValueError):
raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST)
# for interactive unfurls with block kit
if (
self.data.get("type") == "block_actions"
and self.data.get("container", {}).get("is_app_unfurl")
and ("app_unfurl" not in self.data or len(self.data["app_unfurl"]["blocks"]) == 0)
):
raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST)
def _log_request(self) -> None:
self._info("slack.action")
def get_logging_data(
self,
group: Group | None = None,
) -> dict[str, Any]:
logging_data: dict[str, Any] = {
**self.logging_data,
"response_url": self.response_url,
}
if group:
logging_data.update(
{
"group_id": group.id,
"organization_id": group.organization.id,
}
)
return logging_data
def get_tags(self) -> set[str]:
blocks = self.data.get("message", {}).get("blocks", [{}])
tags = set()
for block in blocks:
if "tags" not in block.get("block_id", ""):
continue
text: str = block.get("text", {}).get("text", "")
tag_keys = text.split("`")
for i, tag_key in enumerate(tag_keys):
# the tags are organized as tag_key: tag_value, so even indexed tags are keys
if i % 2 == 1:
continue
if tag_key.strip().endswith(":"):
tags.add(tag_key.strip(": "))
return tags
def get_action_ts(self) -> str | None:
"""
Get the action timestamp from the Slack request data.
Returns:
str | None: The action timestamp if available, None otherwise.
"""
actions = self.data.get("actions", [])
if actions and isinstance(actions, list) and len(actions) > 0:
(action,) = actions
return action.get("action_ts")
return None
| SlackActionRequest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_graph.py | {
"start": 57558,
"end": 57830
} | class ____(graphene.ObjectType):
class Meta:
name = "DefinitionGroup"
groupName = graphene.NonNull(graphene.String)
repositoryName = graphene.NonNull(graphene.String)
repositoryLocationName = graphene.NonNull(graphene.String)
| GrapheneDefinitionGroup |
python | django-haystack__django-haystack | haystack/backends/elasticsearch_backend.py | {
"start": 39158,
"end": 39281
} | class ____(BaseEngine):
backend = ElasticsearchSearchBackend
query = ElasticsearchSearchQuery
| ElasticsearchSearchEngine |
python | openai__openai-python | src/openai/types/fine_tuning/reinforcement_hyperparameters_param.py | {
"start": 248,
"end": 1357
} | class ____(TypedDict, total=False):
batch_size: Union[Literal["auto"], int]
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
compute_multiplier: Union[Literal["auto"], float]
"""
Multiplier on amount of compute used for exploring search space during training.
"""
eval_interval: Union[Literal["auto"], int]
"""The number of training steps between evaluation runs."""
eval_samples: Union[Literal["auto"], int]
"""Number of evaluation samples to generate per training step."""
learning_rate_multiplier: Union[Literal["auto"], float]
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int]
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
reasoning_effort: Literal["default", "low", "medium", "high"]
"""Level of reasoning effort."""
| ReinforcementHyperparametersParam |
python | PyCQA__pylint | tests/functional/u/unexpected_special_method_signature.py | {
"start": 1563,
"end": 2008
} | class ____:
def __aiter__(self, extra): # [unexpected-special-method-signature]
pass
def __anext__(self, extra, argument): # [unexpected-special-method-signature]
pass
def __await__(self, param): # [unexpected-special-method-signature]
pass
def __aenter__(self, first): # [unexpected-special-method-signature]
pass
def __aexit__(self): # [unexpected-special-method-signature]
pass
| Async |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_health.py | {
"start": 1590,
"end": 1828
} | class ____(graphene.ObjectType):
numNotExecutedChecks = graphene.NonNull(graphene.Int)
totalNumChecks = graphene.NonNull(graphene.Int)
class Meta:
name = "AssetHealthCheckUnknownMeta"
| GrapheneAssetHealthCheckUnknownMeta |
python | encode__django-rest-framework | tests/test_parsers.py | {
"start": 5290,
"end": 6727
} | class ____(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def test_post_accessed_in_post_method(self):
django_request = self.factory.post('/', {'foo': 'bar'})
request = Request(django_request, parsers=[FormParser(), MultiPartParser()])
django_request.POST
assert request.POST == {'foo': ['bar']}
assert request.data == {'foo': ['bar']}
def test_post_accessed_in_post_method_with_json_parser(self):
django_request = self.factory.post('/', {'foo': 'bar'})
request = Request(django_request, parsers=[JSONParser()])
django_request.POST
assert request.POST == {}
assert request.data == {}
def test_post_accessed_in_put_method(self):
django_request = self.factory.put('/', {'foo': 'bar'})
request = Request(django_request, parsers=[FormParser(), MultiPartParser()])
django_request.POST
assert request.POST == {'foo': ['bar']}
assert request.data == {'foo': ['bar']}
def test_request_read_before_parsing(self):
django_request = self.factory.put('/', {'foo': 'bar'})
request = Request(django_request, parsers=[FormParser(), MultiPartParser()])
django_request.read()
with pytest.raises(RawPostDataException):
request.POST
with pytest.raises(RawPostDataException):
request.POST
request.data
| TestPOSTAccessed |
python | pytorch__pytorch | test/inductor/test_compiled_autograd.py | {
"start": 3265,
"end": 3482
} | class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
raise NotImplementedError("must override")
| BaseCustomOp |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/prefill_decode_disagg/test_prefill_decode_disagg.py | {
"start": 7385,
"end": 8942
} | class ____:
@pytest.mark.parametrize("kv_connector", ["NixlConnector", "LMCacheConnectorV1"])
def test_parse_dict(self, kv_connector: str):
prefill_config = LLMConfig(
model_loading_config=dict(
model_id="qwen-0.5b",
model_source="Qwen/Qwen2.5-0.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=2,
max_replicas=2,
)
),
engine_kwargs=dict(
tensor_parallel_size=1,
kv_transfer_config=dict(
kv_connector=kv_connector,
kv_role="kv_both",
),
),
)
decode_config = LLMConfig(
model_loading_config=dict(
model_id="qwen-0.5b",
model_source="Qwen/Qwen2.5-0.5B-Instruct",
),
deployment_config=dict(
autoscaling_config=dict(
min_replicas=1,
max_replicas=1,
)
),
engine_kwargs=dict(
tensor_parallel_size=1,
kv_transfer_config=dict(
kv_connector=kv_connector,
kv_role="kv_both",
),
),
)
pd_config = {"prefill_config": prefill_config, "decode_config": decode_config}
app = build_pd_openai_app(pd_config)
assert app is not None
| TestServingArgsParsing |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 163055,
"end": 163884
} | class ____(ASTBase):
def __init__(
self, nestedName: ASTNestedName, templatePrefix: ASTTemplateDeclarationPrefix
) -> None:
self.nestedName = nestedName
self.templatePrefix = templatePrefix
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTNamespace):
return NotImplemented
return (
self.nestedName == other.nestedName
and self.templatePrefix == other.templatePrefix
)
def __hash__(self) -> int:
return hash((self.nestedName, self.templatePrefix))
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.templatePrefix:
res.append(transform(self.templatePrefix))
res.append(transform(self.nestedName))
return ''.join(res)
| ASTNamespace |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/deadcode.py | {
"start": 1371,
"end": 1595
} | class ____():
def __init__(self, y):
return
self.x = _test_source()
_test_sink(y)
def early_return_no_issue_class():
object = EarlyReturns(_test_source())
_test_sink(object.x)
| EarlyReturns |
python | crytic__slither | slither/printers/summary/martin.py | {
"start": 645,
"end": 1226
} | class ____(AbstractPrinter):
ARGUMENT = "martin"
HELP = "Martin agile software metrics (Ca, Ce, I, A, D)"
WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#martin"
def output(self, _filename: str) -> Output:
if len(self.contracts) == 0:
return self.generate_output("No contract found")
martin = MartinMetrics(self.contracts)
res = self.generate_output(martin.full_text)
res.add_pretty_table(martin.core.pretty_table, martin.core.title)
self.info(martin.full_text)
return res
| Martin |
python | wandb__wandb | wandb/vendor/pygments/lexers/modeling.py | {
"start": 585,
"end": 3719
} | class ____(RegexLexer):
"""
For `Modelica <http://www.modelica.org/>`_ source code.
.. versionadded:: 1.1
"""
name = 'Modelica'
aliases = ['modelica']
filenames = ['*.mo']
mimetypes = ['text/x-modelica']
flags = re.DOTALL | re.MULTILINE
_name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)"
tokens = {
'whitespace': [
(u'[\\s\ufeff]+', Text),
(r'//[^\n]*\n?', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'root': [
include('whitespace'),
(r'"', String.Double, 'string'),
(r'[()\[\]{},;]+', Punctuation),
(r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator),
(r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float),
(r'\d+', Number.Integer),
(r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|'
r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|'
r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|'
r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|'
r'identity|inStream|integer|Integer|interval|inverse|isPresent|'
r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|'
r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|'
r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|'
r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|'
r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|'
r'transpose|vector|zeros)\b', Name.Builtin),
(r'(algorithm|annotation|break|connect|constant|constrainedby|der|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
r'equation|exit|expandable|extends|external|final|flow|for|if|'
r'import|impure|in|initial|inner|input|loop|nondiscrete|outer|'
r'output|parameter|partial|protected|public|pure|redeclare|'
r'replaceable|return|stream|then|when|while)\b',
Keyword.Reserved),
(r'(and|not|or)\b', Operator.Word),
(r'(block|class|connector|end|function|model|operator|package|'
r'record|type)\b', Keyword.Reserved, 'class'),
(r'(false|true)\b', Keyword.Constant),
(r'within\b', Keyword.Reserved, 'package-prefix'),
(_name, Name)
],
'class': [
include('whitespace'),
(r'(function|record)\b', Keyword.Reserved),
(r'(if|for|when|while)\b', Keyword.Reserved, '#pop'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'package-prefix': [
include('whitespace'),
(_name, Name.Namespace, '#pop'),
default('#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\[\'"?\\abfnrtv]', String.Escape),
(r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))',
using(HtmlLexer)),
(r'<|\\?[^"\\<]+', String.Double)
]
}
| ModelicaLexer |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 77863,
"end": 78959
} | class ____:
def test_not_oid(self):
with pytest.raises(TypeError):
x509.RegisteredID(b"notanoid") # type:ignore[arg-type]
with pytest.raises(TypeError):
x509.RegisteredID(1.3) # type:ignore[arg-type]
def test_repr(self):
gn = x509.RegisteredID(NameOID.COMMON_NAME)
assert repr(gn) == (
"<RegisteredID(value=<ObjectIdentifier(oid=2.5.4.3, name=commonNam"
"e)>)>"
)
def test_eq(self):
gn = x509.RegisteredID(NameOID.COMMON_NAME)
gn2 = x509.RegisteredID(NameOID.COMMON_NAME)
assert gn == gn2
def test_ne(self):
gn = x509.RegisteredID(NameOID.COMMON_NAME)
gn2 = x509.RegisteredID(ExtensionOID.BASIC_CONSTRAINTS)
assert gn != gn2
assert gn != object()
def test_hash(self):
gn = x509.RegisteredID(NameOID.COMMON_NAME)
gn2 = x509.RegisteredID(NameOID.COMMON_NAME)
gn3 = x509.RegisteredID(ExtensionOID.BASIC_CONSTRAINTS)
assert hash(gn) == hash(gn2)
assert hash(gn) != hash(gn3)
| TestRegisteredID |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/debugger_cli_common_test.py | {
"start": 31691,
"end": 36608
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
self._tc_reg = debugger_cli_common.TabCompletionRegistry()
# Register the items in an unsorted order deliberately, to test the sorted
# output from get_completions().
self._tc_reg.register_tab_comp_context(
["print_tensor", "pt"],
["node_b:1", "node_b:2", "node_a:1", "node_a:2"])
self._tc_reg.register_tab_comp_context(["node_info"],
["node_c", "node_b", "node_a"])
def testTabCompletion(self):
# The returned completions should have sorted order.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a:1", "node_a:2", "node_b:1", "node_b:2"],
"node_"), self._tc_reg.get_completions("pt", ""))
self.assertEqual((["node_a:1", "node_a:2"], "node_a:"),
self._tc_reg.get_completions("print_tensor", "node_a"))
self.assertEqual((["node_a:1"], "node_a:1"),
self._tc_reg.get_completions("pt", "node_a:1"))
self.assertEqual(([], ""),
self._tc_reg.get_completions("print_tensor", "node_a:3"))
self.assertEqual((None, None), self._tc_reg.get_completions("foo", "node_"))
def testExtendCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.extend_comp_items("print_tensor", ["node_A:1", "node_A:2"])
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
# Extending the completions for one of the context's context words should
# have taken effect on other context words of the same context as well.
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testExtendCompletionItemsNonexistentContext(self):
with self.assertRaisesRegex(KeyError,
"Context word \"foo\" has not been registered"):
self._tc_reg.extend_comp_items("foo", ["node_A:1", "node_A:2"])
def testRemoveCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.remove_comp_items("pt", ["node_a:1", "node_a:2"])
self.assertEqual((["node_b:1", "node_b:2"], "node_b:"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testRemoveCompletionItemsNonexistentContext(self):
with self.assertRaisesRegex(KeyError,
"Context word \"foo\" has not been registered"):
self._tc_reg.remove_comp_items("foo", ["node_a:1", "node_a:2"])
def testDeregisterContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
self.assertEqual((None, None),
self._tc_reg.get_completions("print_tensor", "node_"))
# The alternative context word should be unaffected.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
def testDeregisterNonexistentContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
with self.assertRaisesRegex(
KeyError,
"Cannot deregister unregistered context word \"print_tensor\""):
self._tc_reg.deregister_context(["print_tensor"])
| TabCompletionRegistryTest |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 4096,
"end": 9065
} | class ____(ModelOutput):
r"""
waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
The final audio waveform predicted by the model.
waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*):
The length in samples of each element in the `waveform` batch.
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The generated translated sequences. This is the output of the text-to-text or the speech-to-text models.
The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished
early due to the `eos_token_id`.
unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*):
The generated translated unit sequences. This is the output of the text-to-units model. The second
dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished
early due to the `t2u_eos_token_id`.
"""
waveform: Optional[torch.FloatTensor] = None
waveform_lengths: Optional[torch.IntTensor] = None
sequences: Optional[tuple[torch.FloatTensor]] = None
unit_sequences: Optional[tuple[torch.FloatTensor]] = None
############ UTILS ################
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):
"""
Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that
stops at the corresponding element in `seq_lens`.
Args:
hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):
The sequences to mask, where `*` is any number of sequence-specific dimensions including none.
seq_lens (`torch.Tensor` of shape `(batch)`:
Each element represents the length of the sequence at the same index in `hidden_states`
Returns:
`torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`
"""
batch_size, mask_seq_len = hidden_states.shape[:2]
indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)
bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)
mask = hidden_states.new_ones((batch_size, mask_seq_len))
mask = mask.masked_fill(bool_mask, 0)
return mask
def format_speech_generation_kwargs(kwargs):
"""
Format kwargs for SeamlessM4T models that generate speech, attribute kwargs to either the text generation or the
speech generation models.
Args:
kwargs (`dict`)`:
Keyword arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
"""
# attribute kwargs to models
kwargs_text = {}
kwargs_speech = {}
for key, value in kwargs.items():
if key.startswith("text_"):
key = key[len("text_") :]
kwargs_text[key] = value
elif key.startswith("speech_"):
key = key[len("speech_") :]
kwargs_speech[key] = value
elif key == "generation_config":
kwargs_text[key] = value
else:
# If the key is already in a specific config, then it's been set with a
# submodules specific value and we don't override
if key not in kwargs_text:
kwargs_text[key] = value
if key not in kwargs_speech:
kwargs_speech[key] = value
return kwargs_text, kwargs_speech
############ SPEECH ENCODER related code ################
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->SeamlessM4TConformer, feat_extract_activation->speech_encoder_hidden_act
| SeamlessM4TGenerationOutput |
python | ApeWorX__ape | src/ape_compile/config.py | {
"start": 269,
"end": 568
} | class ____(ConfigEnum):
"""
Extra stuff you can output. It will
appear in ``.build/{key.lower()/``
"""
ABI = "ABI"
"""
Include this value to output the ABIs of your contracts
to minified JSONs. This is useful for hosting purposes
for web-apps.
"""
| OutputExtras |
python | pytorch__pytorch | test/distributed/test_c10d_gloo.py | {
"start": 96444,
"end": 100823
} | class ____(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@property
def device(self):
return "cpu"
def setUp(self):
super().setUp()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are identical to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = [tensor.clone() for tensor in target]
else:
tensors = [torch.zeros_like(tensor) for tensor in target]
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_gloo_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", store=store, rank=self.rank, world_size=self.world_size
)
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device(f"cuda:{self.rank:d}")
backend = process_group._get_backend(device)
backend.create_device(interface=LOOPBACK)
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
def test_broadcast_coalesced_gloo_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", store=store, rank=self.rank, world_size=self.world_size
)
process_group = c10d.distributed_c10d._get_default_group()
device = torch.device("cpu")
backend = process_group._get_backend(device)
backend.create_device(interface=LOOPBACK)
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_gloo(self):
self._test_sequence_num_set_default_pg(backend="gloo")
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_gloo_new_group(self):
self._test_sequence_num_set_new_group(backend="gloo")
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_sequence_num_incremented_gloo_default(self):
self._test_sequence_num_incremented_default_group("gloo")
@skip_if_lt_x_gpu(4)
@requires_gloo()
def test_sequence_num_incremented_gloo_subgroup(self):
if self.world_size < 4:
return skip_but_pass_in_sandcastle("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("gloo")
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_gloo_warn_not_in_group(self):
self._test_warn_not_in_group(backend="gloo")
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_gloo_rank_membership(self):
self._test_rank_membership(backend="gloo")
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_tensor_dtype_mismatch(self):
self._test_tensor_dtype_mismatch(backend="gloo")
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_tensor_dtype_complex(self):
self._test_tensor_dtype_complex(backend="gloo")
@requires_gloo()
def test_bool_tensors(self):
self._test_bool_tensors(backend="gloo")
| CommTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/edmodo/provider.py | {
"start": 436,
"end": 1267
} | class ____(OAuth2Provider):
id = "edmodo"
name = "Edmodo"
account_class = EdmodoAccount
oauth2_adapter_class = EdmodoOAuth2Adapter
def get_default_scope(self):
return ["basic"]
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return dict(
first_name=data.get("first_name"),
last_name=data.get("last_name"),
email=data.get("email", ""),
)
def extract_extra_data(self, data):
ret = dict(data)
# NOTE: For backwards compatibility
ret["user_type"] = data.get("type")
ret["profile_url"] = data.get("url")
ret["avatar_url"] = data.get("avatars", {}).get("large")
# (end NOTE)
return ret
provider_classes = [EdmodoProvider]
| EdmodoProvider |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_timedelta64.py | {
"start": 24310,
"end": 26940
} | class ____:
# TODO: parametrize over boxes
@pytest.mark.parametrize("str_ts", ["1950-01-01", "1980-01-01"])
def test_tdarr_add_timestamp_nat_masking(self, box_with_array, str_ts):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"])
tdobj = tm.box_expected(tdinat, box_with_array)
ts = Timestamp(str_ts)
ts_variants = [
ts,
ts.to_pydatetime(),
ts.to_datetime64().astype("datetime64[ns]"),
ts.to_datetime64().astype("datetime64[D]"),
]
for variant in ts_variants:
res = tdobj + variant
if box_with_array is DataFrame:
assert res.iloc[1, 1] is NaT
else:
assert res[1] is NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
# TODO: Make raised error message more informative and test
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
pd.to_timedelta(106580, "D") + Timestamp("2000")
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
Timestamp("2000") + pd.to_timedelta(106580, "D")
_NaT = NaT._value + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], "D") + Timestamp("2000")
with pytest.raises(OverflowError, match=msg):
Timestamp("2000") + pd.to_timedelta([106580], "D")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
(
pd.to_timedelta([_NaT, "5 days", "1 hours"])
- pd.to_timedelta(["7 seconds", _NaT, "4 hours"])
)
# These should not overflow!
exp = TimedeltaIndex([NaT])
result = pd.to_timedelta([NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(["4 days", NaT])
result = pd.to_timedelta(["5 days", NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([NaT, NaT, "5 hours"])
result = pd.to_timedelta([NaT, "5 days", "1 hours"]) + pd.to_timedelta(
["7 seconds", NaT, "4 hours"]
)
tm.assert_index_equal(result, exp)
| TestAddSubNaTMasking |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_optimize10.py | {
"start": 315,
"end": 1608
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize10.xlsx")
self.set_text_file("unicode_polish_utf8.txt")
def test_create_file(self):
"""Test example file converting Unicode text."""
# Open the input file with the correct encoding.
textfile = open(self.txt_filename, mode="r", encoding="utf-8")
# Create an new Excel file and convert the text data.
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
worksheet = workbook.add_worksheet()
# Widen the first column to make the text clearer.
worksheet.set_column("A:A", 50)
# Start from the first cell.
row = 0
col = 0
# Read the text file and write it to the worksheet.
for line in textfile:
# Ignore the comments in the sample file.
if line.startswith("#"):
continue
# Write any other lines to the worksheet.
worksheet.write(row, col, line.rstrip("\n"))
row += 1
workbook.close()
textfile.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | tests/sentry/api/test_utils.py | {
"start": 4048,
"end": 6328
} | class ____(APITestCase):
def setUp(self) -> None:
self.handler_error = Exception("nope")
@patch("sys.stderr.write")
def test_logs_error_locally(self, mock_stderr_write: MagicMock) -> None:
try:
raise self.handler_error
except Exception as e:
print_and_capture_handler_exception(e)
(((s,), _),) = mock_stderr_write.call_args_list
assert s.splitlines()[-1] == "Exception: nope"
@patch("sentry.api.utils.capture_exception")
def test_passes_along_exception(
self,
mock_capture_exception: MagicMock,
) -> None:
print_and_capture_handler_exception(self.handler_error)
assert mock_capture_exception.call_args.args[0] == self.handler_error
@patch("sentry.api.utils.capture_exception")
def test_merges_handler_context_with_scope(
self,
mock_capture_exception: MagicMock,
) -> None:
handler_context = {"api_request_URL": "http://dogs.are.great/"}
scope = Scope()
tags = {"maisey": "silly", "charlie": "goofy"}
for tag, value in tags.items():
scope.set_tag(tag, value)
cases = [
# The first half of each tuple is what's passed to
# `print_and_capture_handler_exception`, and the second half is what we expect in the
# scope passed to `capture_exception`
(None, None, {}, {}),
(handler_context, None, {"Request Handler Data": handler_context}, {}),
(None, scope, {}, tags),
(
handler_context,
scope,
{"Request Handler Data": handler_context},
tags,
),
]
for handler_context_arg, scope_arg, expected_scope_contexts, expected_scope_tags in cases:
print_and_capture_handler_exception(self.handler_error, handler_context_arg, scope_arg)
capture_exception_scope_kwarg = mock_capture_exception.call_args.kwargs.get("scope")
assert isinstance(capture_exception_scope_kwarg, Scope)
assert capture_exception_scope_kwarg._contexts == expected_scope_contexts
assert capture_exception_scope_kwarg._tags == expected_scope_tags
| PrintAndCaptureHandlerExceptionTest |
python | pydantic__pydantic | tests/mypy/modules/generics.py | {
"start": 295,
"end": 354
} | class ____(BaseModel):
raw: str
doctype: str
| HtmlBody |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 213170,
"end": 215328
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3]", L_v_: "f32[3, 3]"):
l_x_ = L_x_
l_v_ = L_v_
_jvp_increment_nesting = torch._C._functorch._jvp_increment_nesting(); _jvp_increment_nesting = None
_set_fwd_grad_enabled = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled = None
_enter_dual_level = torch._C._enter_dual_level(); _enter_dual_level = None
_maybe_load_decompositions = torch.autograd.forward_ad._maybe_load_decompositions(); _maybe_load_decompositions = None
_make_dual: "f32[3, 3]" = torch._make_dual(l_x_, l_v_, level = 0); l_x_ = l_v_ = None
sin: "f32[3, 3]" = _make_dual.sin(); _make_dual = None
result_duals: "f32[]" = sin.sum(); sin = None
_unpack_dual = torch._unpack_dual(result_duals, level = 0); result_duals = None
primal: "f32[]" = _unpack_dual[0]
dual: "f32[]" = _unpack_dual[1]; _unpack_dual = None
primals_out_unflatten: "f32[]" = torch._C._functorch._unwrap_for_grad(primal, 1); primal = None
tangents_out_unflatten: "f32[]" = torch._C._functorch._unwrap_for_grad(dual, 1); dual = None
_exit_dual_level = torch._C._exit_dual_level(0); _exit_dual_level = None
_set_fwd_grad_enabled_1 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_1 = None
_jvp_decrement_nesting = torch._C._functorch._jvp_decrement_nesting(); _jvp_decrement_nesting = None
return (primals_out_unflatten, tangents_out_unflatten)
""",
)
def test_jvp_has_aux(self):
counters.clear()
def fn(x):
return x.sin().sum(), x
def wrapper_fn(x, v):
return torch.func.jvp(fn, (x,), (v,), has_aux=True)
x = torch.randn(3, 3)
v = torch.randn(3, 3)
wrapped_gm = self._compile_check(wrapper_fn, (x, v))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | python__mypy | mypyc/ir/ops.py | {
"start": 60605,
"end": 60699
} | class ____(NamedTuple):
classes: dict[str, ClassIR]
functions: dict[str, FuncIR]
| DeserMaps |
python | jazzband__django-waffle | waffle/tests/test_testutils.py | {
"start": 9753,
"end": 10193
} | class ____:
@classmethod
def setUpClass(cls):
super().setUpClass()
assert not waffle.get_waffle_flag_model().objects.filter(name='foo').exists()
waffle.get_waffle_flag_model().objects.create(name='foo', everyone=True)
def test_undecorated_method_is_set_properly_for_flag(self):
self.assertFalse(waffle.flag_is_active(req(), 'foo'))
@override_flag('foo', active=False)
| OverrideFlagOnClassTestsMixin |
python | walkccc__LeetCode | solutions/3333. Find the Original Typed String II/3333.py | {
"start": 0,
"end": 1224
} | class ____:
def possibleStringCount(self, word: str, k: int) -> int:
MOD = 1_000_000_007
groups = self._getConsecutiveLetters(word)
totalCombinations = functools.reduce(lambda subtotal, group:
subtotal * group % MOD, groups)
if k <= len(groups):
return totalCombinations
# dp[j] := the number of ways to form strings of length j using groups[0..i]
dp = [0] * k
dp[0] = 1 # Base case: empty string
for i, group in enumerate(groups):
newDp = [0] * k
windowSum = 0
for j in range(i, k):
newDp[j] = (newDp[j] + windowSum) % MOD
windowSum = (windowSum + dp[j]) % MOD
if j >= group:
windowSum = (windowSum - dp[j - group] + MOD) % MOD
dp = newDp
return (totalCombinations - sum(dp)) % MOD
def _getConsecutiveLetters(self, word: str) -> list[int]:
"""
Returns consecutive identical letters in the input string.
e.g. "aabbbc" -> [2, 3, 1].
"""
groups = []
group = 1
for i in range(1, len(word)):
if word[i] == word[i - 1]:
group += 1
else:
groups.append(group)
group = 1
groups.append(group)
return groups
| Solution |
python | numba__numba | numba/cuda/deviceufunc.py | {
"start": 20201,
"end": 20852
} | class ____(object):
def __init__(self, parent, ishapes, oshapes, loopdims, pinned):
self.parent = parent
# core shapes
self.ishapes = ishapes
self.oshapes = oshapes
# looping dimension
self.loopdims = loopdims
self.loopn = reduce(operator.mul, loopdims, 1)
# flags
self.pinned = pinned
self.output_shapes = [loopdims + s for s in oshapes]
def __str__(self):
import pprint
attrs = 'ishapes', 'oshapes', 'loopdims', 'loopn', 'pinned'
values = [(k, getattr(self, k)) for k in attrs]
return pprint.pformat(dict(values))
| GUFuncSchedule |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 147603,
"end": 148813
} | class ____(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
dilation=1,
stride=1,
groups=1,
):
super().__init__()
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
)
self.stride = stride
self.kernel_size = (kernel_size - 1) * dilation + 1
self.dilation = dilation
self.padding = self.kernel_size - self.stride
def _get_extra_padding_for_conv1d(self, hidden_state: torch.Tensor) -> int:
length = hidden_state.shape[-1]
n_frames = (length - self.kernel_size + self.padding) / self.stride + 1
ideal_length = (math.ceil(n_frames) - 1) * self.stride + (self.kernel_size - self.padding)
return ideal_length - length
def forward(self, hidden_state):
extra_padding = self._get_extra_padding_for_conv1d(hidden_state)
hidden_state = F.pad(hidden_state, (self.padding, extra_padding), mode="constant", value=0)
return self.conv(hidden_state).contiguous()
| Qwen3OmniMoeCausalConvNet |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 7788,
"end": 7921
} | class ____(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
| IOWarning |
python | mlflow__mlflow | tests/sagemaker/mock/__init__.py | {
"start": 32625,
"end": 34709
} | class ____:
"""
Object representing a SageMaker transform job operation ("create" or "stop"). Every
transform job is associated with the operation that was most recently invoked on it.
"""
def __init__(self, latency_seconds, pending_status, completed_status):
"""
Args:
latency_seconds: The latency of the operation, in seconds. Before the time window
specified by this latency elapses, the operation will have the status
specified by ``pending_status``. After the time window elapses, the
operation will have the status specified by ``completed_status``.
pending_status: The status that the operation should reflect *before* the latency
window has elapsed.
completed_status: The status that the operation should reflect *after* the latency
window has elapsed.
"""
self.latency_seconds = latency_seconds
self.pending_status = pending_status
self.completed_status = completed_status
self.start_time = time.time()
def status(self):
if time.time() - self.start_time < self.latency_seconds:
return self.pending_status
else:
return self.completed_status
@classmethod
def create_successful(cls, latency_seconds):
return cls(
latency_seconds=latency_seconds,
pending_status=TransformJob.STATUS_IN_PROGRESS,
completed_status=TransformJob.STATUS_COMPLETED,
)
@classmethod
def create_unsuccessful(cls, latency_seconds):
return cls(
latency_seconds=latency_seconds,
pending_status=TransformJob.STATUS_IN_PROGRESS,
completed_status=TransformJob.STATUS_FAILED,
)
@classmethod
def stop_successful(cls, latency_seconds):
return cls(
latency_seconds=latency_seconds,
pending_status=TransformJob.STATUS_STOPPING,
completed_status=TransformJob.STATUS_STOPPED,
)
| TransformJobOperation |
python | huggingface__transformers | src/transformers/models/albert/modeling_albert.py | {
"start": 10561,
"end": 11646
} | class ____(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[BaseModelOutput, tuple]:
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
for i in range(self.config.num_hidden_layers):
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
hidden_states = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
**kwargs,
)
return BaseModelOutput(last_hidden_state=hidden_states)
@auto_docstring
| AlbertTransformer |
python | uqfoundation__dill | dill/tests/test_moduledict.py | {
"start": 627,
"end": 1182
} | class ____(object):
def __reduce__(self):
raise Exception
unpicklable = SomeUnreferencedUnpicklableClass()
# This works fine outside of Doctest:
def test_normal():
serialized = dill.dumps(lambda x: x)
# should not try to pickle unpicklable object in __globals__
def tests():
"""
>>> serialized = dill.dumps(lambda x: x)
"""
return
#print("\n\nRunning Doctest:")
def test_doctest():
doctest.testmod()
if __name__ == '__main__':
test_decorated()
test_normal()
test_doctest()
| SomeUnreferencedUnpicklableClass |
python | spack__spack | lib/spack/spack/installer.py | {
"start": 2881,
"end": 3988
} | class ____(enum.Enum):
"""Different build (task) states."""
#: Build status indicating task has been added/queued.
QUEUED = enum.auto()
#: Build status indicating the spec failed to install
FAILED = enum.auto()
#: Build status indicating the spec is being installed (possibly by another
#: process)
INSTALLING = enum.auto()
#: Build status indicating the spec was successfully installed
INSTALLED = enum.auto()
#: Build status indicating the task has been popped from the queue
DEQUEUED = enum.auto()
#: Build status indicating task has been removed (to maintain priority
#: queue invariants).
REMOVED = enum.auto()
def __str__(self):
return f"{self.name.lower()}"
def _write_timer_json(pkg, timer, cache):
extra_attributes = {"name": pkg.name, "cache": cache, "hash": pkg.spec.dag_hash()}
try:
with open(pkg.times_log_path, "w", encoding="utf-8") as timelog:
timer.write_json(timelog, extra_attributes=extra_attributes)
except Exception as e:
tty.debug(str(e))
return
| BuildStatus |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 6677,
"end": 7649
} | class ____:
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
np.random.seed(seed)
self.label_names = ["labels"] if label_names is None else label_names
self.length = length
self.x = np.random.normal(size=(length,)).astype(np.float32)
self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
self.ys = [y.astype(np.float32) for y in self.ys]
def __len__(self):
return self.length
def __getitem__(self, i):
result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
result["input_x"] = self.x[i]
return result
# Converting Bytes to Megabytes
def bytes2megabytes(x):
return int(x / 2**20)
# Copied from accelerate: https://github.com/huggingface/accelerate/blob/ee163b66fb7848892519e804688cb4ae981aacbe/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py#L40C1-L73C68
| RegressionDataset |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 39561,
"end": 40076
} | class ____(Qwen2_5OmniPreTrainedModel, PreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
std = self.config.initializer_range
if isinstance(module, Qwen3OmniMoeThinkerTextSparseMoeBlock):
init.normal_(module.experts.gate_up_proj, mean=0.0, std=std)
init.normal_(module.experts.down_proj, mean=0.0, std=std)
init.normal_(module.router.weight, mean=0.0, std=std)
| Qwen3OmniMoePreTrainedModel |
python | dagster-io__dagster | python_modules/libraries/dagster-airflow/dagster_airflow/resources/airflow_db.py | {
"start": 595,
"end": 3849
} | class ____:
"""Airflow database Dagster resource."""
def __init__(self, dagster_run: DagsterRun, dag_run_config: Optional[dict] = None):
self.dagster_run = dagster_run
self.dag_run_config = dag_run_config
def _parse_execution_date_for_job(
self, dag: DAG, run_tags: Mapping[str, str]
) -> Optional[datetime.datetime]:
execution_date_str = run_tags.get(AIRFLOW_EXECUTION_DATE_STR)
if not execution_date_str:
raise DagsterInvariantViolationError(
"Expected execution_date_str to be set in run tags."
)
check.str_param(execution_date_str, "execution_date_str")
try:
execution_date = pendulum.parse(
execution_date_str, tz=pendulum.timezone(dag.timezone.name)
)
except ValueError:
raise DagsterInvariantViolationError(
f'Could not parse execution_date "{execution_date_str}". Please use datetime'
" format compatible with dateutil.parser.parse."
)
except OverflowError:
raise DagsterInvariantViolationError(
f'Date "{execution_date_str}" exceeds the largest valid C integer on the system.'
)
return execution_date # pyright: ignore[reportReturnType]
def _parse_execution_date_for_asset(
self, dag: DAG, run_tags: Mapping[str, str]
) -> Optional[datetime.datetime]:
execution_date_str = run_tags.get("dagster/partition")
if not execution_date_str:
raise DagsterInvariantViolationError("dagster/partition is not set")
execution_date = pendulum.parse(execution_date_str, tz=pendulum.timezone(dag.timezone.name))
return execution_date # pyright: ignore[reportReturnType]
def get_dagrun(self, dag: DAG) -> DagRun:
run_tags = self.dagster_run.tags if self.dagster_run else {}
if AIRFLOW_EXECUTION_DATE_STR in run_tags:
execution_date = self._parse_execution_date_for_job(dag, run_tags)
elif "dagster/partition" in run_tags:
execution_date = self._parse_execution_date_for_asset(dag, run_tags)
else:
raise DagsterInvariantViolationError(
f'Could not find "{AIRFLOW_EXECUTION_DATE_STR}" in tags "{run_tags}". Please '
f'add "{AIRFLOW_EXECUTION_DATE_STR}" to tags before executing'
)
dagrun = dag.get_dagrun(execution_date=execution_date)
if not dagrun:
if is_airflow_2_loaded_in_environment():
dagrun = dag.create_dagrun(
state=DagRunState.RUNNING, # pyright: ignore[reportPossiblyUnboundVariable]
execution_date=execution_date,
run_type=DagRunType.MANUAL, # pyright: ignore[reportPossiblyUnboundVariable]
conf=self.dag_run_config,
)
else:
dagrun = dag.create_dagrun(
run_id=f"dagster_airflow_run_{execution_date}",
state=State.RUNNING, # type: ignore
execution_date=execution_date,
conf=self.dag_run_config,
)
return dagrun
| AirflowDatabase |
python | scrapy__scrapy | tests/test_request_attribute_binding.py | {
"start": 1160,
"end": 1378
} | class ____(SingleRequestSpider):
name = "alternative_callbacks_spider"
def alt_callback(self, response, foo=None):
self.logger.info("alt_callback was invoked with foo=%s", foo)
| AlternativeCallbacksSpider |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_contextlib.py | {
"start": 16193,
"end": 17521
} | class ____(__TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 / 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
| LockContextTestCase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.