language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-outbrain-amplify/source_outbrain_amplify/source.py | {
"start": 8798,
"end": 10665
} | class ____(OutbrainAmplifyStream, HttpSubStream):
primary_key = None
def __init__(self, authenticator, config, parent: CampaignsByMarketers, **kwargs):
super().__init__(parent=parent, **kwargs)
self.config = config
self._authenticator = authenticator
self._session = requests.sessions.Session()
@property
def name(self) -> str:
return "promoted_links"
def stream_slices(
self, sync_mode: SyncMode.full_refresh, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"campaign_id": record.get("id")}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
if response.json():
for x in response.json().get("promotedLinks"):
yield x
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return f"campaigns/{stream_slice['campaign_id']}/promotedLinks?limit={DEFAULT_LIMIT}"
# List PromotedLinksSequences for Campaign.
# Collection of all PromotedLinksSequences for the specified Campaign.
| PromotedLinksForCampaigns |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict13.py | {
"start": 468,
"end": 504
} | class ____(ParentB):
x: int
| ChildB |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B018.py | {
"start": 28,
"end": 390
} | class ____:
"""abc"""
a = 2
"str" # Str (no raise)
f"{int}" # JoinedStr (no raise)
1j # Number (complex)
1 # Number (int)
1.0 # Number (float)
b"foo" # Binary
True # NameConstant (True)
False # NameConstant (False)
None # NameConstant (None)
[1, 2] # list
{1, 2} # set
{"foo": "bar"} # dict
| Foo2 |
python | spyder-ide__spyder | spyder/plugins/remoteclient/tests/test_plugin.py | {
"start": 1571,
"end": 2793
} | class ____:
def test_wrong_version(
self,
remote_client: RemoteClient,
remote_client_id: str,
monkeypatch,
qtbot,
):
monkeypatch.setattr(
"spyder.plugins.remoteclient.api.manager.ssh.SPYDER_REMOTE_MAX_VERSION",
"0.0.1",
)
monkeypatch.setattr(
"spyder.plugins.remoteclient.api.manager.jupyterhub.SPYDER_REMOTE_MAX_VERSION",
"0.0.1",
)
monkeypatch.setattr(
"spyder.plugins.remoteclient.widgets.container.SPYDER_REMOTE_MAX_VERSION",
"0.0.1",
)
def mock_critical(parent, title, text, buttons):
assert "spyder-remote-services" in text
assert "0.0.1" in text
assert "is newer than" in text
return QMessageBox.Ok
monkeypatch.setattr(
"spyder.plugins.remoteclient.widgets.container.QMessageBox.critical",
mock_critical,
)
with qtbot.waitSignal(
remote_client.sig_version_mismatch,
timeout=180000,
):
remote_client.start_remote_server(remote_client_id)
if __name__ == "__main__":
pytest.main()
| TestVersionCheck |
python | pikepdf__pikepdf | src/pikepdf/objects.py | {
"start": 6359,
"end": 7111
} | class ____(Object, metaclass=_ObjectMeta):
"""Construct a PDF Array object."""
object_type = ObjectType.array
def __new__(cls, a: Iterable | Rectangle | Matrix | None = None) -> Array:
"""Construct a PDF Array.
Args:
a: An iterable of objects. All objects must be either
`pikepdf.Object` or convertible to `pikepdf.Object`.
"""
if isinstance(a, str | bytes):
raise TypeError('Strings cannot be converted to arrays of chars')
if a is None:
a = []
elif isinstance(a, Rectangle | Matrix):
return a.as_array()
elif isinstance(a, Array):
return cast(Array, a.__copy__())
return _core._new_array(a)
| Array |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py | {
"start": 6295,
"end": 7759
} | class ____(graphene.ObjectType):
name = graphene.NonNull(graphene.String)
job_name = graphene.NonNull(graphene.String)
runConfigOrError = graphene.NonNull(GraphenePartitionRunConfigOrError)
tagsOrError = graphene.NonNull(GraphenePartitionTagsOrError)
class Meta:
name = "PartitionTagsAndConfig"
def __init__(
self,
remote_job: RemoteJob,
partition_name: str,
selected_asset_keys: Optional[AbstractSet[AssetKey]],
):
self._remote_job = remote_job
self._partition_name = partition_name
self._selected_asset_keys = selected_asset_keys
super().__init__(name=partition_name, job_name=remote_job.name)
@capture_error
def resolve_runConfigOrError(self, graphene_info: ResolveInfo) -> GraphenePartitionRunConfig:
return get_partition_config(
graphene_info,
self._remote_job.repository_handle,
self._remote_job.name,
self._partition_name,
selected_asset_keys=self._selected_asset_keys,
)
@capture_error
def resolve_tagsOrError(self, graphene_info: ResolveInfo) -> GraphenePartitionTags:
return get_partition_tags(
graphene_info,
self._remote_job.repository_handle.to_selector(),
self._remote_job.name,
self._partition_name,
selected_asset_keys=self._selected_asset_keys,
)
| GrapheneJobSelectionPartition |
python | getsentry__sentry | tests/sentry/attachments/test_base.py | {
"start": 148,
"end": 4169
} | class ____:
"""
In-memory mock cache that roughly works like Django cache. Extended with
internal assertions to ensure correct use of `raw`.
"""
def __init__(self):
self.data = {}
#: Used to check for consistent usage of `raw` param
self.raw_map = {}
def get(self, key, raw=False):
assert key not in self.raw_map or raw == self.raw_map[key]
return copy.deepcopy(self.data.get(key))
def set(self, key, value, timeout=None, raw=False):
# Attachment chunks MUST be bytestrings. Josh please don't change this
# to unicode.
assert isinstance(value, bytes) or not raw
assert key not in self.raw_map or raw == self.raw_map[key]
self.data[key] = value
def delete(self, key):
del self.data[key]
def test_meta_basic() -> None:
att = CachedAttachment(key="c:foo", id=123, name="lol.txt", content_type="text/plain", chunks=3)
# Regression test to verify that we do not add additional attributes. Note
# that ``rate_limited`` is missing from this dict.
assert att.meta() == {
"key": "c:foo",
"id": 123,
"chunks": 3,
"content_type": "text/plain",
"name": "lol.txt",
"type": "event.attachment",
}
def test_meta_rate_limited() -> None:
att = CachedAttachment(
key="c:foo", id=123, name="lol.txt", content_type="text/plain", chunks=3, rate_limited=True
)
assert att.meta() == {
"key": "c:foo",
"id": 123,
"chunks": 3,
"content_type": "text/plain",
"name": "lol.txt",
"rate_limited": True,
"type": "event.attachment",
}
def test_basic_chunked() -> None:
data = InMemoryCache()
cache = BaseAttachmentCache(data)
cache.set_chunk("c:foo", 123, 0, b"Hello World! ")
cache.set_chunk("c:foo", 123, 1, b"")
cache.set_chunk("c:foo", 123, 2, b"Bye.")
att = CachedAttachment(key="c:foo", id=123, name="lol.txt", content_type="text/plain", chunks=3)
(meta,) = cache.set("c:foo", [att])
att2 = CachedAttachment(cache=cache, **meta)
assert att2.key == att.key == "c:foo"
assert att2.id == att.id == 123
assert att2.load_data() == att.load_data() == b"Hello World! Bye."
assert att2.rate_limited is None
@django_db_all
def test_basic_unchunked() -> None:
data = InMemoryCache()
cache = BaseAttachmentCache(data)
att = CachedAttachment(name="lol.txt", content_type="text/plain", data=b"Hello World! Bye.")
(meta,) = cache.set("c:foo", [att])
att2 = CachedAttachment(cache=cache, **meta)
assert att2.key == att.key == "c:foo"
assert att2.id == att.id == 0
assert att2.load_data() == att.load_data() == b"Hello World! Bye."
assert att2.rate_limited is None
@django_db_all
def test_zstd_chunks() -> None:
data = InMemoryCache()
cache = BaseAttachmentCache(data)
cache.set_chunk("mixed_chunks", 123, 0, b"Hello World! ")
cache.set_chunk("mixed_chunks", 123, 1, b"Just visiting. ")
cache.set_chunk("mixed_chunks", 123, 2, b"Bye.")
mixed_chunks = cache.get_from_chunks(key="mixed_chunks", id=123, chunks=3)
assert mixed_chunks.load_data() == b"Hello World! Just visiting. Bye."
att = CachedAttachment(key="not_chunked", id=456, data=b"Hello World! Bye.")
(meta,) = cache.set("not_chunked", [att])
not_chunked = CachedAttachment(cache=cache, **meta)
assert not_chunked.load_data() == b"Hello World! Bye."
@django_db_all
def test_basic_rate_limited() -> None:
data = InMemoryCache()
cache = BaseAttachmentCache(data)
att = CachedAttachment(
name="lol.txt", content_type="text/plain", data=b"Hello World! Bye.", rate_limited=True
)
(meta,) = cache.set("c:foo", [att])
att2 = CachedAttachment(cache=cache, **meta)
assert att2.key == att.key == "c:foo"
assert att2.id == att.id == 0
assert att2.load_data() == att.load_data() == b"Hello World! Bye."
assert att2.rate_limited is True
| InMemoryCache |
python | pypa__setuptools | setuptools/tests/config/test_apply_pyprojecttoml.py | {
"start": 19933,
"end": 25710
} | class ____:
def pyproject(self, tmp_path, dynamic, extra_content=""):
content = f"[project]\nname = 'proj'\ndynamic = {dynamic!r}\n"
if "version" not in dynamic:
content += "version = '42'\n"
file = tmp_path / "pyproject.toml"
file.write_text(content + extra_content, encoding="utf-8")
return file
@pytest.mark.parametrize(
("attr", "field", "value"),
[
("license_expression", "license", "MIT"),
pytest.param(
*("license", "license", "Not SPDX"),
marks=[pytest.mark.filterwarnings("ignore:.*license. overwritten")],
),
("classifiers", "classifiers", ["Private :: Classifier"]),
("entry_points", "scripts", {"console_scripts": ["foobar=foobar:main"]}),
("entry_points", "gui-scripts", {"gui_scripts": ["bazquux=bazquux:main"]}),
pytest.param(
*("install_requires", "dependencies", ["six"]),
marks=[
pytest.mark.filterwarnings("ignore:.*install_requires. overwritten")
],
),
],
)
def test_not_listed_in_dynamic(self, tmp_path, attr, field, value):
"""Setuptools cannot set a field if not listed in ``dynamic``"""
pyproject = self.pyproject(tmp_path, [])
dist = makedist(tmp_path, **{attr: value})
msg = re.compile(f"defined outside of `pyproject.toml`:.*{field}", re.DOTALL)
with pytest.warns(_MissingDynamic, match=msg):
dist = pyprojecttoml.apply_configuration(dist, pyproject)
dist_value = _some_attrgetter(f"metadata.{attr}", attr)(dist)
assert not dist_value
@pytest.mark.parametrize(
("attr", "field", "value"),
[
("license_expression", "license", "MIT"),
("install_requires", "dependencies", []),
("extras_require", "optional-dependencies", {}),
("install_requires", "dependencies", ["six"]),
("classifiers", "classifiers", ["Private :: Classifier"]),
],
)
def test_listed_in_dynamic(self, tmp_path, attr, field, value):
pyproject = self.pyproject(tmp_path, [field])
dist = makedist(tmp_path, **{attr: value})
dist = pyprojecttoml.apply_configuration(dist, pyproject)
dist_value = _some_attrgetter(f"metadata.{attr}", attr)(dist)
assert dist_value == value
def test_license_files_exempt_from_dynamic(self, monkeypatch, tmp_path):
"""
license-file is currently not considered in the context of dynamic.
As per 2025-02-19, https://packaging.python.org/en/latest/specifications/pyproject-toml/#license-files
allows setuptools to fill-in `license-files` the way it sees fit:
> If the license-files key is not defined, tools can decide how to handle license files.
> For example they can choose not to include any files or use their own
> logic to discover the appropriate files in the distribution.
Using license_files from setup.py to fill-in the value is in accordance
with this rule.
"""
monkeypatch.chdir(tmp_path)
pyproject = self.pyproject(tmp_path, [])
dist = makedist(tmp_path, license_files=["LIC*"])
(tmp_path / "LIC1").write_text("42", encoding="utf-8")
dist = pyprojecttoml.apply_configuration(dist, pyproject)
assert dist.metadata.license_files == ["LIC1"]
def test_warning_overwritten_dependencies(self, tmp_path):
src = "[project]\nname='pkg'\nversion='0.1'\ndependencies=['click']\n"
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(src, encoding="utf-8")
dist = makedist(tmp_path, install_requires=["wheel"])
with pytest.warns(match="`install_requires` overwritten"):
dist = pyprojecttoml.apply_configuration(dist, pyproject)
assert "wheel" not in dist.install_requires
def test_optional_dependencies_dont_remove_env_markers(self, tmp_path):
"""
Internally setuptools converts dependencies with markers to "extras".
If ``install_requires`` is given by ``setup.py``, we have to ensure that
applying ``optional-dependencies`` does not overwrite the mandatory
dependencies with markers (see #3204).
"""
# If setuptools replace its internal mechanism that uses `requires.txt`
# this test has to be rewritten to adapt accordingly
extra = "\n[project.optional-dependencies]\nfoo = ['bar>1']\n"
pyproject = self.pyproject(tmp_path, ["dependencies"], extra)
install_req = ['importlib-resources (>=3.0.0) ; python_version < "3.7"']
dist = makedist(tmp_path, install_requires=install_req)
dist = pyprojecttoml.apply_configuration(dist, pyproject)
assert "foo" in dist.extras_require
egg_info = dist.get_command_obj("egg_info")
write_requirements(egg_info, tmp_path, tmp_path / "requires.txt")
reqs = (tmp_path / "requires.txt").read_text(encoding="utf-8")
assert "importlib-resources" in reqs
assert "bar" in reqs
assert ':python_version < "3.7"' in reqs
@pytest.mark.parametrize(
("field", "group"),
[("scripts", "console_scripts"), ("gui-scripts", "gui_scripts")],
)
@pytest.mark.filterwarnings("error")
def test_scripts_dont_require_dynamic_entry_points(self, tmp_path, field, group):
# Issue 3862
pyproject = self.pyproject(tmp_path, [field])
dist = makedist(tmp_path, entry_points={group: ["foobar=foobar:main"]})
dist = pyprojecttoml.apply_configuration(dist, pyproject)
assert group in dist.entry_points
| TestPresetField |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 3033,
"end": 3725
} | class ____(BaseSimpleType):
@classmethod
def convert_from_xml(cls, str_value: str) -> bool:
if str_value not in ("1", "0", "true", "false"):
raise InvalidXmlError(
"value must be one of '1', '0', 'true' or 'false', got '%s'" % str_value
)
return str_value in ("1", "true")
@classmethod
def convert_to_xml(cls, value: bool) -> str:
return {True: "1", False: "0"}[value]
@classmethod
def validate(cls, value: Any) -> None:
if value not in (True, False):
raise TypeError(
"only True or False (and possibly None) may be assigned, got '%s'" % value
)
| XsdBoolean |
python | huggingface__transformers | src/transformers/models/granitemoe/modeling_granitemoe.py | {
"start": 5904,
"end": 7577
} | class ____(nn.Module):
def __init__(self, num_experts: int, input_size: int, output_size: int) -> None:
"""
Initialize the GraniteMoeParallelExperts module.
The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with
many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and
[ScatterMoE](https://github.com/shawntan/scattermoe), as well as the
[MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py)
used in vllm.
Args:
num_experts (int):
Number of experts.
input_size (int):
Size of the input.
output_size (int):
Size of the output.
"""
super().__init__()
self.weight = nn.Parameter(torch.empty(num_experts, output_size, input_size))
self.num_experts = num_experts
self.input_size = input_size
self.output_size = output_size
def forward(self, inputs, expert_size):
"""
Forward pass of the GraniteMoeParallelExperts module.
Args:
inputs (Tensor):
Input tensor.
expert_size:
Expert size information.
Returns:
Tensor: Output tensor.
"""
input_list = inputs.split(expert_size, dim=0)
output_list = []
for i in range(self.num_experts):
output_list.append(F.linear(input_list[i], self.weight[i]))
results = torch.cat(output_list, dim=0)
return results
| GraniteMoeParallelExperts |
python | joke2k__faker | faker/providers/company/de_AT/__init__.py | {
"start": 45,
"end": 556
} | class ____(CompanyProvider):
# Source: https://www.wko.at/wirtschaftsrecht/gesellschaftsformen-oesterreich
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{last_name}} & {{last_name}} {{company_suffix}}",
)
company_suffixes = (
"AG",
"AG",
"AG",
"GesbR",
"GmbH",
"GmbH",
"GmbH",
"KG",
"KG",
"KG",
"OG",
"e.V.",
)
| Provider |
python | mlflow__mlflow | tests/pyfunc/sample_code/code_with_dependencies.py | {
"start": 192,
"end": 582
} | class ____(PythonModel):
def _call_retriever(self, id):
return f"Retriever called with ID: {id}. Output: 42."
def predict(self, context, model_input):
return f"Input: {model_input}. {self._call_retriever(model_input)}"
def predict_stream(self, context, model_input, params=None):
yield f"Input: {model_input}. {self._call_retriever(model_input)}"
| MyModel |
python | scikit-learn__scikit-learn | sklearn/neural_network/_multilayer_perceptron.py | {
"start": 1615,
"end": 31498
} | class ____(BaseEstimator, ABC):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
.. versionadded:: 0.18
"""
_parameter_constraints: dict = {
"hidden_layer_sizes": [
"array-like",
Interval(Integral, 1, None, closed="left"),
],
"activation": [StrOptions({"identity", "logistic", "tanh", "relu"})],
"solver": [StrOptions({"lbfgs", "sgd", "adam"})],
"alpha": [Interval(Real, 0, None, closed="left")],
"batch_size": [
StrOptions({"auto"}),
Interval(Integral, 1, None, closed="left"),
],
"learning_rate": [StrOptions({"constant", "invscaling", "adaptive"})],
"learning_rate_init": [Interval(Real, 0, None, closed="neither")],
"power_t": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"shuffle": ["boolean"],
"random_state": ["random_state"],
"tol": [Interval(Real, 0, None, closed="left")],
"verbose": ["verbose"],
"warm_start": ["boolean"],
"momentum": [Interval(Real, 0, 1, closed="both")],
"nesterovs_momentum": ["boolean"],
"early_stopping": ["boolean"],
"validation_fraction": [Interval(Real, 0, 1, closed="left")],
"beta_1": [Interval(Real, 0, 1, closed="left")],
"beta_2": [Interval(Real, 0, 1, closed="left")],
"epsilon": [Interval(Real, 0, None, closed="neither")],
"n_iter_no_change": [
Interval(Integral, 1, None, closed="left"),
Options(Real, {np.inf}),
],
"max_fun": [Interval(Integral, 1, None, closed="left")],
}
@abstractmethod
def __init__(
self,
hidden_layer_sizes,
activation,
solver,
alpha,
batch_size,
learning_rate,
learning_rate_init,
power_t,
max_iter,
loss,
shuffle,
random_state,
tol,
verbose,
warm_start,
momentum,
nesterovs_momentum,
early_stopping,
validation_fraction,
beta_1,
beta_2,
epsilon,
n_iter_no_change,
max_fun,
):
self.activation = activation
self.solver = solver
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.n_iter_no_change = n_iter_no_change
self.max_fun = max_fun
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i], self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
hidden_activation(activations[i + 1])
# For the last layer
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activations[i + 1])
return activations
def _forward_pass_fast(self, X, check_input=True):
"""Predict using the trained model
This is the same as _forward_pass but does not record the activations
of all layers and only returns the last layer's activation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
check_input : bool, default=True
Perform input data validation or not.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
if check_input:
X = validate_data(self, X, accept_sparse=["csr", "csc"], reset=False)
# Initialize first layer
activation = X
# Forward propagate
hidden_activation = ACTIVATIONS[self.activation]
for i in range(self.n_layers_ - 1):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if i != self.n_layers_ - 2:
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
return activation
def _compute_loss_grad(
self, layer, sw_sum, activations, deltas, coef_grads, intercept_grads
):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer])
coef_grads[layer] += self.alpha * self.coefs_[layer]
coef_grads[layer] /= sw_sum
intercept_grads[layer] = np.sum(deltas[layer], axis=0) / sw_sum
def _loss_grad_lbfgs(
self,
packed_coef_inter,
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in lbfgs
Parameters
----------
packed_coef_inter : ndarray
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, sample_weight, activations, deltas, coef_grads, intercept_grads
)
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(
self, X, y, sample_weight, activations, deltas, coef_grads, intercept_grads
):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss_func_name = self.loss
if loss_func_name == "log_loss" and self.out_activation_ == "logistic":
loss_func_name = "binary_log_loss"
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1], sample_weight)
# Add L2 regularization term to loss
values = 0
for s in self.coefs_:
s = s.ravel()
values += np.dot(s, s)
if sample_weight is None:
sw_sum = n_samples
else:
sw_sum = sample_weight.sum()
loss += (0.5 * self.alpha) * values / sw_sum
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] is as follows:
# delta[last] = d/dz loss(y, act(z)) = act(z) - y
# with z=x@w + b being the output of the last layer before passing through the
# output activation, act(z) = activations[-1].
# The simple formula for delta[last] here works with following (canonical
# loss-link) combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
deltas[last] = activations[-1] - y
if sample_weight is not None:
deltas[last] *= sample_weight.reshape(-1, 1)
# Compute gradient for the last layer
self._compute_loss_grad(
last, sw_sum, activations, deltas, coef_grads, intercept_grads
)
inplace_derivative = DERIVATIVES[self.activation]
# Iterate over the hidden layers
for i in range(last, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative(activations[i], deltas[i - 1])
self._compute_loss_grad(
i - 1, sw_sum, activations, deltas, coef_grads, intercept_grads
)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units, dtype):
# set all attributes, allocate weights etc. for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not is_classifier(self):
if self.loss == "poisson":
self.out_activation_ = "exp"
else:
# loss = "squared_error"
self.out_activation_ = "identity"
# Output for multi class
elif self._label_binarizer.y_type_ == "multiclass":
self.out_activation_ = "softmax"
# Output for binary class and multi-label
else:
self.out_activation_ = "logistic"
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
coef_init, intercept_init = self._init_coef(
layer_units[i], layer_units[i + 1], dtype
)
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy() for i in self.intercepts_]
if self.solver in _STOCHASTIC_SOLVERS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
self.best_loss_ = None
else:
self.best_loss_ = np.inf
self.validation_scores_ = None
self.best_validation_score_ = None
def _init_coef(self, fan_in, fan_out, dtype):
# Use the initialization method recommended by
# Glorot et al.
factor = 6.0
if self.activation == "logistic":
factor = 2.0
init_bound = np.sqrt(factor / (fan_in + fan_out))
# Generate weights and bias:
coef_init = self._random_state.uniform(
-init_bound, init_bound, (fan_in, fan_out)
)
intercept_init = self._random_state.uniform(-init_bound, init_bound, fan_out)
coef_init = coef_init.astype(dtype, copy=False)
intercept_init = intercept_init.astype(dtype, copy=False)
return coef_init, intercept_init
def _fit(self, X, y, sample_weight=None, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError(
"hidden_layer_sizes must be > 0, got %s." % hidden_layer_sizes
)
first_pass = not hasattr(self, "coefs_") or (
not self.warm_start and not incremental
)
X, y = self._validate_input(X, y, incremental, reset=first_pass)
n_samples, n_features = X.shape
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = [n_features] + hidden_layer_sizes + [self.n_outputs_]
# check random state
self._random_state = check_random_state(self.random_state)
if first_pass:
# First time training the model
self._initialize(y, layer_units, X.dtype)
# Initialize lists
activations = [X] + [None] * (len(layer_units) - 1)
deltas = [None] * (len(activations) - 1)
coef_grads = [
np.empty((n_fan_in_, n_fan_out_), dtype=X.dtype)
for n_fan_in_, n_fan_out_ in pairwise(layer_units)
]
intercept_grads = [
np.empty(n_fan_out_, dtype=X.dtype) for n_fan_out_ in layer_units[1:]
]
# Run the Stochastic optimization solver
if self.solver in _STOCHASTIC_SOLVERS:
self._fit_stochastic(
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
incremental,
)
# Run the LBFGS solver
elif self.solver == "lbfgs":
self._fit_lbfgs(
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
)
# validate parameter weights
weights = chain(self.coefs_, self.intercepts_)
if not all(np.isfinite(w).all() for w in weights):
raise ValueError(
"Solver produced non-finite parameter weights. The input data may"
" contain large values and need to be preprocessed."
)
return self
def _fit_lbfgs(
self,
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_, self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
opt_res = scipy.optimize.minimize(
self._loss_grad_lbfgs,
packed_coef_inter,
method="L-BFGS-B",
jac=True,
options={
"maxfun": self.max_fun,
"maxiter": self.max_iter,
"gtol": self.tol,
**_get_additional_lbfgs_options_dict("iprint", iprint),
},
args=(
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
),
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.loss_ = opt_res.fun
self._unpack(opt_res.x)
def _fit_stochastic(
self,
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
incremental,
):
params = self.coefs_ + self.intercepts_
if not incremental or not hasattr(self, "_optimizer"):
if self.solver == "sgd":
self._optimizer = SGDOptimizer(
params,
self.learning_rate_init,
self.learning_rate,
self.momentum,
self.nesterovs_momentum,
self.power_t,
)
elif self.solver == "adam":
self._optimizer = AdamOptimizer(
params,
self.learning_rate_init,
self.beta_1,
self.beta_2,
self.epsilon,
)
# early_stopping in partial_fit doesn't make sense
if self.early_stopping and incremental:
raise ValueError("partial_fit does not support early_stopping=True")
early_stopping = self.early_stopping
if early_stopping:
# don't stratify in multilabel classification
should_stratify = is_classifier(self) and self.n_outputs_ == 1
stratify = y if should_stratify else None
if sample_weight is None:
X_train, X_val, y_train, y_val = train_test_split(
X,
y,
random_state=self._random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
sample_weight_train = sample_weight_val = None
else:
# TODO: incorporate sample_weight in sampling here.
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
random_state=self._random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
if X_val.shape[0] < 2:
raise ValueError(
"The validation set is too small. Increase 'validation_fraction' "
"or the size of your dataset."
)
if is_classifier(self):
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
X_val = y_val = sample_weight_val = None
n_samples = X_train.shape[0]
sample_idx = np.arange(n_samples, dtype=int)
if self.batch_size == "auto":
batch_size = min(200, n_samples)
else:
if self.batch_size > n_samples:
warnings.warn(
"Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped"
)
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
self.n_iter_ = 0
for it in range(self.max_iter):
if self.shuffle:
# Only shuffle the sample indices instead of X and y to
# reduce the memory footprint. These indices will be used
# to slice the X and y.
sample_idx = shuffle(sample_idx, random_state=self._random_state)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
if self.shuffle:
batch_idx = sample_idx[batch_slice]
X_batch = _safe_indexing(X_train, batch_idx)
else:
batch_idx = batch_slice
X_batch = X_train[batch_idx]
y_batch = y_train[batch_idx]
if sample_weight is None:
sample_weight_batch = None
else:
sample_weight_batch = sample_weight_train[batch_idx]
activations[0] = X_batch
batch_loss, coef_grads, intercept_grads = self._backprop(
X_batch,
y_batch,
sample_weight_batch,
activations,
deltas,
coef_grads,
intercept_grads,
)
accumulated_loss += batch_loss * (
batch_slice.stop - batch_slice.start
)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(params, grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X_train.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_, self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(
early_stopping, X_val, y_val, sample_weight_val
)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > self.n_iter_no_change:
# not better than last `n_iter_no_change` iterations by tol
# stop or decrease learning rate
if early_stopping:
msg = (
"Validation score did not improve more than "
"tol=%f for %d consecutive epochs."
% (self.tol, self.n_iter_no_change)
)
else:
msg = (
"Training loss did not improve more than tol=%f"
" for %d consecutive epochs."
% (self.tol, self.n_iter_no_change)
)
is_stopping = self._optimizer.trigger_stopping(msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn(
"Stochastic Optimizer: Maximum iterations (%d) "
"reached and the optimization hasn't converged yet."
% self.max_iter,
ConvergenceWarning,
)
except KeyboardInterrupt:
warnings.warn("Training interrupted by user.")
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X, y, sample_weight):
if early_stopping:
# compute validation score (can be NaN), use that for stopping
val_score = self._score(X, y, sample_weight=sample_weight)
self.validation_scores_.append(val_score)
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ + self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy() for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model to data matrix X and target(s) y.
Parameters
----------
X : ndarray or sparse matrix of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 1.7
Returns
-------
self : object
Returns a trained MLP model.
"""
return self._fit(X, y, sample_weight=sample_weight, incremental=False)
def _check_solver(self):
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError(
"partial_fit is only available for stochastic"
" optimizers. %s is not stochastic." % self.solver
)
return True
def _score_with_function(self, X, y, sample_weight, score_function):
"""Private score method without input validation."""
# Input validation would remove feature names, so we disable it
y_pred = self._predict(X, check_input=False)
if np.isnan(y_pred).any() or np.isinf(y_pred).any():
return np.nan
return score_function(y, y_pred, sample_weight=sample_weight)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| BaseMultilayerPerceptron |
python | scikit-learn__scikit-learn | sklearn/model_selection/_split.py | {
"start": 92578,
"end": 96765
} | class ____(BaseCrossValidator):
"""Predefined split cross-validator.
Provides train/test indices to split data into train/test sets using a
predefined scheme specified by the user with the ``test_fold`` parameter.
Read more in the :ref:`User Guide <predefined_split>`.
.. versionadded:: 0.16
Parameters
----------
test_fold : array-like of shape (n_samples,)
The entry ``test_fold[i]`` represents the index of the test set that
sample ``i`` belongs to. It is possible to exclude sample ``i`` from
any test set (i.e. include sample ``i`` in every training set) by
setting ``test_fold[i]`` equal to -1.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps)
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for i, (train_index, test_index) in enumerate(ps.split()):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 2 3]
Test: index=[0]
Fold 1:
Train: index=[0 2]
Test: index=[1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
Always ignored, exists for API compatibility.
y : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
groups : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
return self._split()
def _split(self):
"""Generate indices to split data into training and test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
Always ignored, exists for API compatibility.
y : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
groups : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
| PredefinedSplit |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/shell-script-component/with-scaffolder.py | {
"start": 269,
"end": 1001
} | class ____(dg.Scaffolder):
"""Scaffolds a template shell script alongside a filled-out defs.yaml file."""
def scaffold(self, request: ScaffoldRequest) -> None:
dg.scaffold_component(
request,
{
"script_path": "script.sh",
"asset_specs": [
{"key": "my_asset", "description": "Output of running a script"}
],
},
)
script_path = Path(request.target_path) / "script.sh"
script_path.write_text("#!/bin/bash\n\necho 'Hello, world!'")
os.chmod(script_path, 0o755)
# highlight-end
# highlight-start
@scaffold_with(ShellCommandScaffolder)
# highlight-end
@dataclass
| ShellCommandScaffolder |
python | gevent__gevent | src/greentest/3.13/test_queue.py | {
"start": 22601,
"end": 22744
} | class ____(BaseQueueTestMixin):
def setUp(self):
self.type2test = self.queue.PriorityQueue
super().setUp()
| PriorityQueueTest |
python | lepture__mistune | tests/test_commonmark.py | {
"start": 1860,
"end": 2194
} | class ____(BaseTestCase):
@classmethod
def ignore_case(cls, n):
return n in IGNORE_CASES or n in DIFF_CASES
def assert_case(self, n, text, html):
result = mistune.html(text)
self.assertEqual(normalize_html(result), normalize_html(html))
TestCommonMark.load_fixtures("commonmark.json")
| TestCommonMark |
python | redis__redis-py | redis/cluster.py | {
"start": 5405,
"end": 13160
} | class ____:
RedisClusterRequestTTL = 16
PRIMARIES = "primaries"
REPLICAS = "replicas"
ALL_NODES = "all"
RANDOM = "random"
DEFAULT_NODE = "default-node"
NODE_FLAGS = {PRIMARIES, REPLICAS, ALL_NODES, RANDOM, DEFAULT_NODE}
COMMAND_FLAGS = dict_merge(
list_keys_to_dict(
[
"ACL CAT",
"ACL DELUSER",
"ACL DRYRUN",
"ACL GENPASS",
"ACL GETUSER",
"ACL HELP",
"ACL LIST",
"ACL LOG",
"ACL LOAD",
"ACL SAVE",
"ACL SETUSER",
"ACL USERS",
"ACL WHOAMI",
"AUTH",
"CLIENT LIST",
"CLIENT SETINFO",
"CLIENT SETNAME",
"CLIENT GETNAME",
"CONFIG SET",
"CONFIG REWRITE",
"CONFIG RESETSTAT",
"TIME",
"PUBSUB CHANNELS",
"PUBSUB NUMPAT",
"PUBSUB NUMSUB",
"PUBSUB SHARDCHANNELS",
"PUBSUB SHARDNUMSUB",
"PING",
"INFO",
"SHUTDOWN",
"KEYS",
"DBSIZE",
"BGSAVE",
"SLOWLOG GET",
"SLOWLOG LEN",
"SLOWLOG RESET",
"WAIT",
"WAITAOF",
"SAVE",
"MEMORY PURGE",
"MEMORY MALLOC-STATS",
"MEMORY STATS",
"LASTSAVE",
"CLIENT TRACKINGINFO",
"CLIENT PAUSE",
"CLIENT UNPAUSE",
"CLIENT UNBLOCK",
"CLIENT ID",
"CLIENT REPLY",
"CLIENT GETREDIR",
"CLIENT INFO",
"CLIENT KILL",
"READONLY",
"CLUSTER INFO",
"CLUSTER MEET",
"CLUSTER MYSHARDID",
"CLUSTER NODES",
"CLUSTER REPLICAS",
"CLUSTER RESET",
"CLUSTER SET-CONFIG-EPOCH",
"CLUSTER SLOTS",
"CLUSTER SHARDS",
"CLUSTER COUNT-FAILURE-REPORTS",
"CLUSTER KEYSLOT",
"COMMAND",
"COMMAND COUNT",
"COMMAND LIST",
"COMMAND GETKEYS",
"CONFIG GET",
"DEBUG",
"RANDOMKEY",
"READONLY",
"READWRITE",
"TIME",
"TFUNCTION LOAD",
"TFUNCTION DELETE",
"TFUNCTION LIST",
"TFCALL",
"TFCALLASYNC",
"LATENCY HISTORY",
"LATENCY LATEST",
"LATENCY RESET",
"MODULE LIST",
"MODULE LOAD",
"MODULE UNLOAD",
"MODULE LOADEX",
],
DEFAULT_NODE,
),
list_keys_to_dict(
[
"FLUSHALL",
"FLUSHDB",
"FUNCTION DELETE",
"FUNCTION FLUSH",
"FUNCTION LIST",
"FUNCTION LOAD",
"FUNCTION RESTORE",
"SCAN",
"SCRIPT EXISTS",
"SCRIPT FLUSH",
"SCRIPT LOAD",
],
PRIMARIES,
),
list_keys_to_dict(["FUNCTION DUMP"], RANDOM),
list_keys_to_dict(
[
"CLUSTER COUNTKEYSINSLOT",
"CLUSTER DELSLOTS",
"CLUSTER DELSLOTSRANGE",
"CLUSTER GETKEYSINSLOT",
"CLUSTER SETSLOT",
],
SLOT_ID,
),
)
SEARCH_COMMANDS = (
[
"FT.CREATE",
"FT.SEARCH",
"FT.AGGREGATE",
"FT.EXPLAIN",
"FT.EXPLAINCLI",
"FT,PROFILE",
"FT.ALTER",
"FT.DROPINDEX",
"FT.ALIASADD",
"FT.ALIASUPDATE",
"FT.ALIASDEL",
"FT.TAGVALS",
"FT.SUGADD",
"FT.SUGGET",
"FT.SUGDEL",
"FT.SUGLEN",
"FT.SYNUPDATE",
"FT.SYNDUMP",
"FT.SPELLCHECK",
"FT.DICTADD",
"FT.DICTDEL",
"FT.DICTDUMP",
"FT.INFO",
"FT._LIST",
"FT.CONFIG",
"FT.ADD",
"FT.DEL",
"FT.DROP",
"FT.GET",
"FT.MGET",
"FT.SYNADD",
],
)
CLUSTER_COMMANDS_RESPONSE_CALLBACKS = {
"CLUSTER SLOTS": parse_cluster_slots,
"CLUSTER SHARDS": parse_cluster_shards,
"CLUSTER MYSHARDID": parse_cluster_myshardid,
}
RESULT_CALLBACKS = dict_merge(
list_keys_to_dict(["PUBSUB NUMSUB", "PUBSUB SHARDNUMSUB"], parse_pubsub_numsub),
list_keys_to_dict(
["PUBSUB NUMPAT"], lambda command, res: sum(list(res.values()))
),
list_keys_to_dict(
["KEYS", "PUBSUB CHANNELS", "PUBSUB SHARDCHANNELS"], merge_result
),
list_keys_to_dict(
[
"PING",
"CONFIG SET",
"CONFIG REWRITE",
"CONFIG RESETSTAT",
"CLIENT SETNAME",
"BGSAVE",
"SLOWLOG RESET",
"SAVE",
"MEMORY PURGE",
"CLIENT PAUSE",
"CLIENT UNPAUSE",
],
lambda command, res: all(res.values()) if isinstance(res, dict) else res,
),
list_keys_to_dict(
["DBSIZE", "WAIT"],
lambda command, res: sum(res.values()) if isinstance(res, dict) else res,
),
list_keys_to_dict(
["CLIENT UNBLOCK"], lambda command, res: 1 if sum(res.values()) > 0 else 0
),
list_keys_to_dict(["SCAN"], parse_scan_result),
list_keys_to_dict(
["SCRIPT LOAD"], lambda command, res: list(res.values()).pop()
),
list_keys_to_dict(
["SCRIPT EXISTS"], lambda command, res: [all(k) for k in zip(*res.values())]
),
list_keys_to_dict(["SCRIPT FLUSH"], lambda command, res: all(res.values())),
)
ERRORS_ALLOW_RETRY = (
ConnectionError,
TimeoutError,
ClusterDownError,
SlotNotCoveredError,
)
def replace_default_node(self, target_node: "ClusterNode" = None) -> None:
"""Replace the default cluster node.
A random cluster node will be chosen if target_node isn't passed, and primaries
will be prioritized. The default node will not be changed if there are no other
nodes in the cluster.
Args:
target_node (ClusterNode, optional): Target node to replace the default
node. Defaults to None.
"""
if target_node:
self.nodes_manager.default_node = target_node
else:
curr_node = self.get_default_node()
primaries = [node for node in self.get_primaries() if node != curr_node]
if primaries:
# Choose a primary if the cluster contains different primaries
self.nodes_manager.default_node = random.choice(primaries)
else:
# Otherwise, choose a primary if the cluster contains different primaries
replicas = [node for node in self.get_replicas() if node != curr_node]
if replicas:
self.nodes_manager.default_node = random.choice(replicas)
| AbstractRedisCluster |
python | numpy__numpy | tools/swig/test/testTensor.py | {
"start": 14375,
"end": 14687
} | class ____(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
self.result = int(self.result)
######################################################################
| ulongLongTestCase |
python | numpy__numpy | numpy/distutils/tests/test_ccompiler_opt_conf.py | {
"start": 977,
"end": 5862
} | class ____(FakeCCompilerOpt):
"""A hook to check the sanity of configured features
- before it called by the abstract class '_Feature'
"""
def conf_features_partial(self):
conf_all = self.conf_features
for feature_name, feature in conf_all.items():
self.test_feature(
"attribute conf_features",
conf_all, feature_name, feature
)
conf_partial = FakeCCompilerOpt.conf_features_partial(self)
for feature_name, feature in conf_partial.items():
self.test_feature(
"conf_features_partial()",
conf_partial, feature_name, feature
)
return conf_partial
def test_feature(self, log, search_in, feature_name, feature_dict):
error_msg = (
"during validate '{}' within feature '{}', "
"march '{}' and compiler '{}'\n>> "
).format(log, feature_name, self.cc_march, self.cc_name)
if not feature_name.isupper():
raise AssertionError(error_msg + "feature name must be in uppercase")
for option, val in feature_dict.items():
self.test_option_types(error_msg, option, val)
self.test_duplicates(error_msg, option, val)
self.test_implies(error_msg, search_in, feature_name, feature_dict)
self.test_group(error_msg, search_in, feature_name, feature_dict)
self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
def test_option_types(self, error_msg, option, val):
for tp, available in (
((str, list), (
"implies", "headers", "flags", "group", "detect", "extra_checks"
)),
((str,), ("disable",)),
((int,), ("interest",)),
((bool,), ("implies_detect",)),
((bool, type(None)), ("autovec",)),
) :
found_it = option in available
if not found_it:
continue
if not isinstance(val, tp):
error_tp = [t.__name__ for t in (*tp,)]
error_tp = ' or '.join(error_tp)
raise AssertionError(error_msg +
"expected '%s' type for option '%s' not '%s'" % (
error_tp, option, type(val).__name__
))
break
if not found_it:
raise AssertionError(error_msg + "invalid option name '%s'" % option)
def test_duplicates(self, error_msg, option, val):
if option not in (
"implies", "headers", "flags", "group", "detect", "extra_checks"
) : return
if isinstance(val, str):
val = val.split()
if len(val) != len(set(val)):
raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
def test_implies(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
implies = feature_dict.get("implies", "")
if not implies:
return
if isinstance(implies, str):
implies = implies.split()
if feature_name in implies:
raise AssertionError(error_msg + "feature implies itself")
for impl in implies:
impl_dict = search_in.get(impl)
if impl_dict is not None:
if "disable" in impl_dict:
raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
continue
raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
def test_group(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
group = feature_dict.get("group", "")
if not group:
return
if isinstance(group, str):
group = group.split()
for f in group:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'group', '%s' already exists as a feature name" % f
)
def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
extra_checks = feature_dict.get("extra_checks", "")
if not extra_checks:
return
if isinstance(extra_checks, str):
extra_checks = extra_checks.split()
for f in extra_checks:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
)
| _TestConfFeatures |
python | rapidsai__cudf | python/cudf/cudf/core/udf/strings_typing.py | {
"start": 801,
"end": 1032
} | class ____(types.Type):
np_dtype: np.dtype[np.object_] = np.dtype("object")
def __init__(self):
super().__init__(name="managed_udf_string")
@property
def return_as(self):
return self
| ManagedUDFString |
python | kamyu104__LeetCode-Solutions | Python/smallest-sufficient-team.py | {
"start": 111,
"end": 925
} | class ____(object):
def smallestSufficientTeam(self, req_skills, people):
"""
:type req_skills: List[str]
:type people: List[List[str]]
:rtype: List[int]
"""
lookup = {v: i for i, v in enumerate(req_skills)}
dp = {0: []}
for i, p in enumerate(people):
his_skill_set = 0
for skill in p:
if skill in lookup:
his_skill_set |= 1 << lookup[skill]
for skill_set, people in dp.items():
with_him = skill_set | his_skill_set
if with_him == skill_set: continue
if with_him not in dp or \
len(dp[with_him]) > len(people)+1:
dp[with_him] = people + [i]
return dp[(1<<len(req_skills))-1]
| Solution |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 39755,
"end": 42007
} | class ____:
def test_properties(self):
value = binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
ski = x509.SubjectKeyIdentifier(value)
assert ski.digest == value
assert ski.key_identifier == value
def test_repr(self):
ski = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
ext = x509.Extension(ExtensionOID.SUBJECT_KEY_IDENTIFIER, False, ski)
assert repr(ext) == (
"<Extension(oid=<ObjectIdentifier(oid=2.5.29.14, name=subjectK"
"eyIdentifier)>, critical=False, value=<SubjectKeyIdentifier(d"
"igest=b'\\t#\\x84\\x93\"0I\\x8b\\xc9\\x80\\xaa\\x80\\x98Eoo"
"\\xf7\\xff:\\xc9')>)>"
)
def test_eq(self):
ski = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
ski2 = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
assert ski == ski2
def test_ne(self):
ski = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
ski2 = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"aa8098456f6ff7ff3ac9092384932230498bc980")
)
assert ski != ski2
assert ski != object()
def test_hash(self):
ski1 = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
ski2 = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
ski3 = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"aa8098456f6ff7ff3ac9092384932230498bc980")
)
assert hash(ski1) == hash(ski2)
assert hash(ski1) != hash(ski3)
def test_public_bytes(self):
ext = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
assert (
ext.public_bytes()
== b'\x04\x14\t#\x84\x93"0I\x8b\xc9\x80\xaa\x80\x98Eoo\xf7\xff:'
b"\xc9"
)
| TestSubjectKeyIdentifier |
python | lepture__authlib | authlib/integrations/httpx_client/oauth1_client.py | {
"start": 979,
"end": 3264
} | class ____(_OAuth1Client, httpx.AsyncClient):
auth_class = OAuth1Auth
def __init__(
self,
client_id,
client_secret=None,
token=None,
token_secret=None,
redirect_uri=None,
rsa_key=None,
verifier=None,
signature_method=SIGNATURE_HMAC_SHA1,
signature_type=SIGNATURE_TYPE_HEADER,
force_include_body=False,
**kwargs,
):
_client_kwargs = extract_client_kwargs(kwargs)
httpx.AsyncClient.__init__(self, **_client_kwargs)
_OAuth1Client.__init__(
self,
None,
client_id=client_id,
client_secret=client_secret,
token=token,
token_secret=token_secret,
redirect_uri=redirect_uri,
rsa_key=rsa_key,
verifier=verifier,
signature_method=signature_method,
signature_type=signature_type,
force_include_body=force_include_body,
**kwargs,
)
async def fetch_access_token(self, url, verifier=None, **kwargs):
"""Method for fetching an access token from the token endpoint.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
:param url: Access Token endpoint.
:param verifier: A verifier string to prove authorization was granted.
:param kwargs: Extra parameters to include for fetching access token.
:return: A token dict.
"""
if verifier:
self.auth.verifier = verifier
if not self.auth.verifier:
self.handle_error("missing_verifier", 'Missing "verifier" value')
token = await self._fetch_token(url, **kwargs)
self.auth.verifier = None
return token
async def _fetch_token(self, url, **kwargs):
resp = await self.post(url, **kwargs)
text = await resp.aread()
token = self.parse_response_token(resp.status_code, to_unicode(text))
self.token = token
return token
@staticmethod
def handle_error(error_type, error_description):
raise OAuthError(error_type, error_description)
| AsyncOAuth1Client |
python | getsentry__sentry | tests/sentry/notifications/notifications/test_assigned.py | {
"start": 908,
"end": 21557
} | class ____(APITestCase):
def validate_email(self, outbox, index, email, txt_msg, html_msg):
msg = outbox[index]
assert msg.to == [email]
assert isinstance(msg, EmailMultiAlternatives)
# check the txt version
assert txt_msg in msg.body
# check the html version
alt0 = msg.alternatives[0][0]
assert isinstance(alt0, str)
assert html_msg in alt0
def validate_slack_message(self, msg, group, project, user_id, mock_post, index=0):
blocks = orjson.loads(mock_post.call_args_list[index].kwargs["blocks"])
fallback_text = mock_post.call_args_list[index].kwargs["text"]
assert fallback_text == msg
assert group.title in blocks[1]["text"]["text"]
assert project.slug in blocks[-2]["elements"][0]["text"]
channel = mock_post.call_args_list[index].kwargs["channel"]
assert channel == str(user_id)
def setup_user(self, user, team):
member = self.create_member(user=user, organization=self.organization, role="member")
self.create_team_membership(team, member, role="admin")
self.create_user_option(user=user, key="self_notifications", value="1")
self.access_token = "xoxb-access-token"
self.identity = self.create_identity(
user=user, identity_provider=self.provider, external_id=user.id
)
def setUp(self) -> None:
super().setUp()
self.integration = self.create_integration(
organization=self.organization,
external_id="TXXXXXXX1",
metadata={
"access_token": "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"domain_name": "sentry.slack.com",
"installation_type": "born_as_bot",
},
name="Awesome Team",
provider="slack",
)
self.provider = self.create_identity_provider(integration=self.integration)
self.login_as(self.user)
def test_sends_assignment_notification(self, mock_post: MagicMock) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when an issue is assigned.
"""
user = self.create_user()
self.setup_user(user, self.team)
self.login_as(user)
url = f"/api/0/issues/{self.group.id}/"
with self.tasks():
response = self.client.put(url, format="json", data={"assignedTo": user.username})
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# check the txt version
assert f"assigned {self.group.qualified_short_id} to themselves" in msg.body
# check the html version
alt0 = msg.alternatives[0][0]
assert isinstance(alt0, str)
assert f"{self.group.qualified_short_id}</a> to themselves</p>" in alt0
blocks = orjson.loads(mock_post.call_args.kwargs["blocks"])
fallback_text = mock_post.call_args.kwargs["text"]
assert fallback_text == f"Issue assigned to {user.get_display_name()} by themselves"
assert self.group.title in blocks[1]["text"]["text"]
assert self.project.slug in blocks[-2]["elements"][0]["text"]
@with_feature("organizations:suspect-commits-in-emails")
def test_sends_assignment_notification_with_suspect_commits(self, mock_post):
"""
Test that suspect commits are included in assignment notification emails
when GroupOwner records exist.
"""
user = self.create_user()
self.setup_user(user, self.team)
self.login_as(user)
# Create a repository and commit for suspect commit testing
repo = self.create_repo(
project=self.project,
name="example/repo",
)
commit = self.create_commit(
project=self.project,
repo=repo,
author=self.create_commit_author(project=self.project, user=user),
key="abc123def456",
message="feat: Add new feature\n\nThis is a longer commit message with details.",
)
# Create a GroupOwner record for suspect commit
GroupOwner.objects.create(
group=self.group,
user_id=user.id,
project=self.project,
organization=self.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
context={"commitId": commit.id},
)
url = f"/api/0/issues/{self.group.id}/"
with self.tasks():
response = self.client.put(url, format="json", data={"assignedTo": user.username})
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# Check that suspect commits appear in plain text email
assert "Suspect Commits" in msg.body
assert "feat: Add new feature" in msg.body # commit subject
assert "abc123d" in msg.body # shortened commit ID
assert user.get_display_name() in msg.body # commit author
# Check that suspect commits appear in HTML email
html_content = msg.alternatives[0][0]
assert isinstance(html_content, str)
assert "Suspect Commits" in html_content
assert "feat: Add new feature" in html_content # commit subject
assert "abc123d" in html_content # shortened commit ID
assert user.get_display_name() in html_content # commit author
@with_feature("organizations:suspect-commits-in-emails")
def test_sends_assignment_notification_without_suspect_commits(self, mock_post):
"""
Test that assignment notifications work normally when no suspect commits exist.
"""
user = self.create_user()
self.setup_user(user, self.team)
self.login_as(user)
url = f"/api/0/issues/{self.group.id}/"
with self.tasks():
response = self.client.put(url, format="json", data={"assignedTo": user.username})
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# Check that no suspect commits section appears
assert "Suspect Commits" not in msg.body
html_content = msg.alternatives[0][0]
assert isinstance(html_content, str)
assert "Suspect Commits" not in html_content
# But assignment notification should still work
assert f"assigned {self.group.qualified_short_id} to themselves" in msg.body
@with_feature("organizations:suspect-commits-in-emails")
def test_enhanced_privacy_hides_suspect_commits_in_emails(self, mock_post):
user = self.create_user()
self.setup_user(user, self.team)
self.login_as(user)
repo = self.create_repo(
project=self.project,
name="example/repo",
)
commit = self.create_commit(
project=self.project,
repo=repo,
author=self.create_commit_author(project=self.project, user=user),
key="abc123def456",
message="feat: Add new feature\n\nThis is a longer commit message with details.",
)
GroupOwner.objects.create(
group=self.group,
user_id=user.id,
project=self.project,
organization=self.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
context={"commitId": commit.id},
)
# Enable enhanced privacy flag
self.organization.update(flags=F("flags").bitor(Organization.flags.enhanced_privacy))
self.organization.refresh_from_db()
assert self.organization.flags.enhanced_privacy.is_set is True
url = f"/api/0/issues/{self.group.id}/"
with self.tasks():
response = self.client.put(url, format="json", data={"assignedTo": user.username})
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
assert "Suspect Commits" not in msg.body # plaintext version
assert "feat: Add new feature" not in msg.body # commit subject should not appear
assert "abc123d" not in msg.body # shortened commit ID should not appear
html_content = msg.alternatives[0][0]
assert isinstance(html_content, str)
assert "Suspect Commits" not in html_content # HTML version
assert "feat: Add new feature" not in html_content # commit subject should not appear
assert "abc123d" not in html_content # shortened commit ID should not appear
# assignment notification should still work normally
assert f"assigned {self.group.qualified_short_id} to themselves" in msg.body
@with_feature("organizations:suspect-commits-in-emails")
def test_enhanced_privacy_default_shows_suspect_commits_in_emails(self, mock_post):
"""
Test that suspect commits are shown by default in assignment notification emails
when enhanced privacy is not set.
"""
user = self.create_user()
self.setup_user(user, self.team)
self.login_as(user)
repo = self.create_repo(
project=self.project,
name="example/repo",
)
commit = self.create_commit(
project=self.project,
repo=repo,
author=self.create_commit_author(project=self.project, user=user),
key="abc123def456",
message="feat: Add new feature\n\nThis is a longer commit message with details.",
)
GroupOwner.objects.create(
group=self.group,
user_id=user.id,
project=self.project,
organization=self.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
context={"commitId": commit.id},
)
assert self.organization.flags.enhanced_privacy.is_set is False
url = f"/api/0/issues/{self.group.id}/"
with self.tasks():
response = self.client.put(url, format="json", data={"assignedTo": user.username})
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
assert "Suspect Commits" in msg.body # plaintext version
assert "feat: Add new feature" in msg.body # commit subject should appear
assert "abc123d" in msg.body # shortened commit ID should appear
html_content = msg.alternatives[0][0]
assert isinstance(html_content, str)
assert "Suspect Commits" in html_content # HTML version
assert "feat: Add new feature" in html_content # commit subject should appear
assert "abc123d" in html_content # shortened commit ID should appear
# assignment notification should still work normally
assert f"assigned {self.group.qualified_short_id} to themselves" in msg.body
@with_feature("organizations:suspect-commits-in-emails")
def test_sends_assignment_notification_with_multiple_suspect_commits(self, mock_post):
"""
Test that when multiple suspect commits exist, the most recent one is displayed in notifications.
"""
user1 = self.create_user(name="Alice", email="alice@example.com")
user2 = self.create_user(name="Bob", email="bob@example.com")
self.setup_user(user1, self.team)
self.login_as(user1)
# Create a repository and multiple commits
repo = self.create_repo(
project=self.project,
name="example/repo",
)
commit1 = self.create_commit(
project=self.project,
repo=repo,
author=self.create_commit_author(project=self.project, user=user1),
key="abc123def456",
message="feat: Add feature A",
)
commit2 = self.create_commit(
project=self.project,
repo=repo,
author=self.create_commit_author(project=self.project, user=user2),
key="def456ghi789",
message="fix: Bug fix for feature A",
)
# Create GroupOwner records for both commits
GroupOwner.objects.create(
group=self.group,
user_id=user1.id,
project=self.project,
organization=self.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
context={"commitId": commit1.id},
)
GroupOwner.objects.create(
group=self.group,
user_id=user2.id,
project=self.project,
organization=self.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
context={"commitId": commit2.id},
)
url = f"/api/0/issues/{self.group.id}/"
with self.tasks():
response = self.client.put(url, format="json", data={"assignedTo": user1.username})
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# Check that only the most recent commit appears (commit2 since it was created later)
assert "Suspect Commits" in msg.body
assert "fix: Bug fix for feature A" in msg.body # This is the more recent commit
assert "def456g" in msg.body
assert "Bob" in msg.body
# The earlier commit should not appear
assert "feat: Add feature A" not in msg.body
def test_sends_reassignment_notification_user(self, mock_post: MagicMock) -> None:
"""Test that if a user is assigned to an issue and then the issue is reassigned to a different user
that the original assignee receives an unassignment notification as well as the new assignee
receiving an assignment notification"""
user1 = self.create_user(email="user1@foo.com")
user2 = self.create_user(email="user2@foo.com")
self.setup_user(user1, self.team)
self.setup_user(user2, self.team)
self.login_as(user1)
url = f"/api/0/issues/{self.group.id}/"
# assign to user 1
with self.tasks():
response = self.client.put(
url,
format="json",
data={"assignedTo": user1.username, "assignedBy": user1.username},
)
assert response.status_code == 200, response.content
data = {
"assignee": str(user1.id),
"assigneeEmail": user1.email,
"assigneeName": user1.name,
"assigneeType": "user",
}
assert Activity.objects.filter(
group_id=self.group.id, type=ActivityType.ASSIGNED.value, user_id=user1.id, data=data
).exists()
assert len(mail.outbox) == 1
txt_msg = f"assigned {self.group.qualified_short_id} to themselves"
html_msg = f"{self.group.qualified_short_id}</a> to themselves</p>"
msg = f"Issue assigned to {user1.get_display_name()} by themselves"
self.validate_slack_message(msg, self.group, self.project, user1.id, mock_post, index=0)
self.validate_email(mail.outbox, 0, user1.email, txt_msg, html_msg)
# re-assign to user 2
with self.tasks():
response = self.client.put(
url,
format="json",
data={"assignedTo": user2.username, "assignedBy": user1.username},
)
assert response.status_code == 200, response.content
data = {
"assignee": str(user2.id),
"assigneeEmail": user2.email,
"assigneeName": user2.name,
"assigneeType": "user",
}
assert Activity.objects.filter(
group_id=self.group.id, type=ActivityType.ASSIGNED.value, user_id=user1.id, data=data
).exists()
assert len(mail.outbox) == 3
txt_msg = f"assigned {self.group.qualified_short_id} to {user2.email}"
html_msg = f"{self.group.qualified_short_id}</a> to {user2.email}</p>"
self.validate_email(mail.outbox, 1, user1.email, txt_msg, html_msg)
txt_msg = f"{user1.email} assigned {self.group.qualified_short_id} to {user2.email}"
html_msg = f"{self.group.qualified_short_id}</a> to {user2.email}"
self.validate_email(mail.outbox, 2, user2.email, txt_msg, html_msg)
msg = f"Issue assigned to {user2.get_display_name()} by {user1.get_display_name()}"
self.validate_slack_message(msg, self.group, self.project, user1.id, mock_post, index=1)
self.validate_slack_message(msg, self.group, self.project, user2.id, mock_post, index=2)
def test_sends_reassignment_notification_team(self, mock_post: MagicMock) -> None:
"""Test that if a team is assigned to an issue and then the issue is reassigned to a different team
that the originally assigned team receives an unassignment notification as well as the new assigned
team receiving an assignment notification"""
user1 = self.create_user("foo@example.com")
user2 = self.create_user("bar@example.com")
user3 = self.create_user("baz@example.com")
user4 = self.create_user("boo@example.com")
team1 = self.create_team()
team2 = self.create_team()
project = self.create_project(teams=[team1, team2])
group = self.create_group(project=project)
self.setup_user(user1, team1)
self.setup_user(user2, team1)
self.setup_user(user3, team2)
self.setup_user(user4, team2)
self.login_as(user1)
url = f"/api/0/issues/{group.id}/"
# assign to team1
with self.tasks():
response = self.client.put(
url,
format="json",
data={"assignedTo": f"team:{team1.id}", "assignedBy": self.user.username},
)
assert response.status_code == 200, response.content
data = {
"assignee": str(team1.id),
"assigneeEmail": None,
"assigneeName": team1.name,
"assigneeType": "team",
}
assert Activity.objects.filter(
group_id=group.id, user_id=user1.id, type=ActivityType.ASSIGNED.value, data=data
).exists()
assert len(mail.outbox) == 2
txt_msg = f"assigned {group.qualified_short_id} to the {team1.slug} team"
html_msg = f"{group.qualified_short_id}</a> to the {team1.slug} team</p>"
self.validate_email(mail.outbox, 0, user1.email, txt_msg, html_msg)
self.validate_email(mail.outbox, 1, user2.email, txt_msg, html_msg)
msg = f"Issue assigned to the {team1.slug} team by {user1.email}"
self.validate_slack_message(msg, group, project, user1.id, mock_post, index=0)
self.validate_slack_message(msg, group, project, user2.id, mock_post, index=1)
# reassign to team2
with self.tasks():
response = self.client.put(
url,
format="json",
data={"assignedTo": f"team:{team2.id}", "assignedBy": self.user.username},
)
assert response.status_code == 200, response.content
data = {
"assignee": str(team2.id),
"assigneeEmail": None,
"assigneeName": team2.name,
"assigneeType": "team",
}
assert Activity.objects.filter(
group_id=group.id, user_id=user1.id, type=ActivityType.ASSIGNED.value, data=data
).exists()
assert len(mail.outbox) == 6
txt_msg = f"{user1.email} assigned {group.qualified_short_id} to the {team2.slug} team"
html_msg = f"{user1.email}</strong> assigned"
self.validate_email(mail.outbox, 2, user1.email, txt_msg, html_msg)
self.validate_email(mail.outbox, 3, user2.email, txt_msg, html_msg)
txt_msg = f"assigned {group.qualified_short_id} to the {team2.slug} team"
html_msg = f"to the {team2.slug} team</p>"
self.validate_email(mail.outbox, 4, user3.email, txt_msg, html_msg)
self.validate_email(mail.outbox, 5, user4.email, txt_msg, html_msg)
msg = f"Issue assigned to the {team2.slug} team by {user1.email}"
self.validate_slack_message(msg, group, project, user1.id, mock_post, index=2)
self.validate_slack_message(msg, group, project, user2.id, mock_post, index=3)
self.validate_slack_message(msg, group, project, user3.id, mock_post, index=4)
self.validate_slack_message(msg, group, project, user4.id, mock_post, index=5)
| AssignedNotificationAPITest |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 1131,
"end": 1370
} | class ____(Message):
message = "'from %s import *' only allowed at module level"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
| ImportStarNotPermitted |
python | ethereum__web3.py | tests/core/method-class/test_result_formatters.py | {
"start": 496,
"end": 907
} | class ____(Module):
method = Method("method_for_test", result_formatters=result_formatter)
@pytest.fixture
def dummy_w3():
w3 = Web3(
DummyProvider(),
modules={"module": ModuleForTest},
)
return w3
def test_result_formatter(dummy_w3, request_mocker):
with request_mocker(dummy_w3, mock_results=result_for_test):
assert dummy_w3.module.method() == "OKAY"
| ModuleForTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple24.py | {
"start": 241,
"end": 696
} | class ____(Generic[*Ts]):
def __init__(self) -> None:
self.x: list[Union[*Ts]] = []
reveal_type(self.x, expected_text="list[Union[*Ts@ClassA]]")
def method(self) -> Union[*Ts]: ...
a1 = ClassA[int, bool, str]()
reveal_type(a1.method(), expected_text="int | bool | str")
reveal_type(a1.x, expected_text="list[int | bool | str]")
def func1(t0: tuple[*Ts], t1: tuple[*Ts]):
return all(v0 == v1 for v0, v1 in zip(t0, t1))
| ClassA |
python | kamyu104__LeetCode-Solutions | Python/longest-increasing-subsequence-ii.py | {
"start": 71,
"end": 1383
} | class ____(object):
def __init__(self, N,
build_fn=lambda _: 0,
query_fn=lambda x, y: y if x is None else x if y is None else max(x, y),
update_fn=lambda x: x):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(h)
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
def query(self, L, R):
if L > R:
return 0
L += self.base
R += self.base
left = right = None
while L <= R:
if L & 1:
left = self.query_fn(left, self.tree[L])
L += 1
if R & 1 == 0:
right = self.query_fn(self.tree[R], right)
R -= 1
L //= 2
R //= 2
return self.query_fn(left, right)
# segment tree with coordinate compression
| SegmentTree |
python | jd__tenacity | tenacity/retry.py | {
"start": 7507,
"end": 8440
} | class ____(retry_if_exception_message):
"""Retries until an exception message equals or matches."""
def __init__(
self,
message: typing.Optional[str] = None,
match: typing.Union[None, str, typing.Pattern[str]] = None,
) -> None:
super().__init__(message, match)
# invert predicate
if_predicate = self.predicate
self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
def __call__(self, retry_state: "RetryCallState") -> bool:
if retry_state.outcome is None:
raise RuntimeError("__call__() called before outcome was set")
if not retry_state.outcome.failed:
return True
exception = retry_state.outcome.exception()
if exception is None:
raise RuntimeError("outcome failed but the exception is None")
return self.predicate(exception)
| retry_if_not_exception_message |
python | huggingface__transformers | src/transformers/models/wav2vec2/modeling_wav2vec2.py | {
"start": 87632,
"end": 93906
} | class ____(Wav2Vec2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wav2vec2 = Wav2Vec2Model(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
self.tdnn = nn.ModuleList(tdnn_layers)
self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wav2vec2.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wav2vec2.parameters():
param.requires_grad = False
def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the TDNN layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return (input_length - kernel_size) // stride + 1
for kernel_size in self.config.tdnn_kernel:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
return input_lengths
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, XVectorOutput]:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wav2vec2(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
for tdnn_layer in self.tdnn:
hidden_states = tdnn_layer(hidden_states)
# Statistic Pooling
if attention_mask is None:
mean_features = hidden_states.mean(dim=1)
std_features = hidden_states.std(dim=1)
else:
feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
mean_features = []
std_features = []
for i, length in enumerate(tdnn_output_lengths):
mean_features.append(hidden_states[i, :length].mean(dim=0))
std_features.append(hidden_states[i, :length].std(dim=0))
mean_features = torch.stack(mean_features)
std_features = torch.stack(std_features)
statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
output_embeddings = self.feature_extractor(statistic_pooling)
logits = self.classifier(output_embeddings)
loss = None
if labels is not None:
loss = self.objective(logits, labels)
if not return_dict:
output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return XVectorOutput(
loss=loss,
logits=logits,
embeddings=output_embeddings,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
| Wav2Vec2ForXVector |
python | huggingface__transformers | tests/models/rag/test_modeling_rag.py | {
"start": 24434,
"end": 25685
} | class ____(RagTestMixin, unittest.TestCase):
@cached_property
def config_and_inputs(self):
question_encoder_tester = DPRModelTester(self)
dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs()
generator_tester = T5ModelTester(self, vocab_size=1101)
t5_config_and_inputs = generator_tester.prepare_config_and_inputs()
(question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs
(generator_config, _, decoder_input_ids, _, decoder_attention_mask, _) = t5_config_and_inputs
config = RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
n_docs=self.n_docs,
retrieval_vector_size=self.retrieval_vector_size,
max_combined_length=self.max_combined_length,
)
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_torch
@require_retrieval
@require_sentencepiece
@require_tokenizers
@require_torch_non_multi_accelerator
@slow
| RagDPRT5Test |
python | encode__django-rest-framework | tests/test_views.py | {
"start": 472,
"end": 1109
} | class ____(APIView):
def get(self, request, *args, **kwargs):
return Response({'method': 'GET'})
def post(self, request, *args, **kwargs):
return Response({'method': 'POST', 'data': request.data})
@api_view(['GET', 'POST', 'PUT', 'PATCH'])
def basic_view(request):
if request.method == 'GET':
return {'method': 'GET'}
elif request.method == 'POST':
return {'method': 'POST', 'data': request.data}
elif request.method == 'PUT':
return {'method': 'PUT', 'data': request.data}
elif request.method == 'PATCH':
return {'method': 'PATCH', 'data': request.data}
| BasicView |
python | qdrant__qdrant-client | qdrant_client/embed/model_embedder.py | {
"start": 871,
"end": 1579
} | class ____(Worker):
def __init__(self, batch_size: int, **kwargs: Any):
self.model_embedder = ModelEmbedder(**kwargs)
self.batch_size = batch_size
@classmethod
def start(cls, batch_size: int, **kwargs: Any) -> "ModelEmbedderWorker":
return cls(threads=1, batch_size=batch_size, **kwargs)
def process(self, items: Iterable[tuple[int, Any]]) -> Iterable[tuple[int, Any]]:
for idx, batch in items:
yield (
idx,
list(
self.model_embedder.embed_models_batch(
batch, inference_batch_size=self.batch_size
)
),
)
| ModelEmbedderWorker |
python | pandas-dev__pandas | doc/source/conf.py | {
"start": 16804,
"end": 17251
} | class ____(MethodDocumenter):
"""
Specialized Documenter subclass for accessors.
"""
objtype = "accessor"
directivetype = "method"
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
def format_signature(self) -> str:
# this method gives an error/warning for the accessors, therefore
# overriding it (accessor has no arguments)
return ""
| AccessorDocumenter |
python | tensorflow__tensorflow | tensorflow/python/training/monitored_session.py | {
"start": 47937,
"end": 52440
} | class ____(_WrappedSession):
"""A wrapped session that recreates a session upon certain kinds of errors.
The constructor is passed a SessionCreator object, not a session.
Calls to `run()` are delegated to the wrapped session. If a call raises the
exception `tf.errors.AbortedError` or `tf.errors.UnavailableError`, the
wrapped session is closed, and a new one is created by calling the factory
again.
"""
def __init__(self, sess_creator):
"""Create a new `_RecoverableSession`.
The value returned by calling `sess_creator.create_session()` will be the
session wrapped by this recoverable session.
Args:
sess_creator: A 'SessionCreator' to be wrapped by recoverable.
"""
self._sess_creator = sess_creator
_WrappedSession.__init__(self, self._create_session())
def _create_session(self):
while True:
try:
return self._sess_creator.create_session()
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised while a session was being created. '
'This may be due to a preemption of a connected worker '
'or parameter server. A new session will be created. '
'This error may also occur due to a gRPC failure caused '
'by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
def _check_stop(self):
try:
if self._sess:
return self._sess._check_stop() # pylint: disable=protected-access
else:
return True
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised while considering whether the '
'session is complete. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = self._create_session()
# Since we have just recreated the session, the overall computation should
# not stop:
return False
except Exception: # pylint: disable=broad-except
# `should_stop` should return True instead of raising an exception.
return True
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
while True:
try:
if not self._sess:
self._sess = self._create_session()
return self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = None
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
while True:
try:
if not self._sess:
self._sess = self._create_session()
run_with_hooks = self._sess.run
return self._sess.run_step_fn(step_fn, raw_session, run_with_hooks)
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = None
| _RecoverableSession |
python | django__django | tests/generic_relations/models.py | {
"start": 2706,
"end": 2832
} | class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(has_tail=True)
| GeckoManager |
python | PyCQA__pylint | tests/functional/i/init_not_called.py | {
"start": 430,
"end": 566
} | class ____(AAAA, BBBB, CCCC):
"""derived class"""
def __init__(self): # [super-init-not-called]
AAAA.__init__(self)
| ZZZZ |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_basic_helm_chart.py | {
"start": 3786,
"end": 36460
} | class ____:
"""Tests basic helm chart tests."""
def _get_values_with_version(self, values, version):
if version != "default":
values["airflowVersion"] = version
return values
def _get_object_count(self, version):
if self._is_airflow_3_or_above(version):
return OBJECT_COUNT_IN_BASIC_DEPLOYMENT
if version == "2.3.2":
return OBJECT_COUNT_IN_AF2_BASIC_DEPLOYMENT + 1
return OBJECT_COUNT_IN_AF2_BASIC_DEPLOYMENT
def _is_airflow_3_or_above(self, version):
return version == "default" or (parse_version(version) >= parse_version("3.0.0"))
@pytest.mark.parametrize("version", ["2.3.2", "2.4.0", "3.0.0", "default"])
def test_basic_deployments(self, version):
k8s_objects = render_chart(
"test-basic",
self._get_values_with_version(
values={
"chart": {
"metadata": "AA",
},
"labels": {"test-label": "TEST-VALUE"},
"fullnameOverride": "test-basic",
},
version=version,
),
)
list_of_kind_names_tuples = {
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
}
expected = {
("ServiceAccount", "test-basic-create-user-job"),
("ServiceAccount", "test-basic-migrate-database-job"),
("ServiceAccount", "test-basic-redis"),
("ServiceAccount", "test-basic-scheduler"),
("ServiceAccount", "test-basic-statsd"),
("ServiceAccount", "test-basic-triggerer"),
("ServiceAccount", "test-basic-worker"),
("Secret", "test-basic-metadata"),
("Secret", "test-basic-broker-url"),
("Secret", "test-basic-fernet-key"),
("Secret", "test-basic-postgresql"),
("Secret", "test-basic-redis-password"),
("ConfigMap", "test-basic-config"),
("ConfigMap", "test-basic-statsd"),
("Role", "test-basic-pod-launcher-role"),
("Role", "test-basic-pod-log-reader-role"),
("RoleBinding", "test-basic-pod-launcher-rolebinding"),
("RoleBinding", "test-basic-pod-log-reader-rolebinding"),
("Service", "test-basic-postgresql-hl"),
("Service", "test-basic-postgresql"),
("Service", "test-basic-redis"),
("Service", "test-basic-statsd"),
("Service", "test-basic-worker"),
("Deployment", "test-basic-scheduler"),
("Deployment", "test-basic-statsd"),
(self.default_trigger_obj(version), "test-basic-triggerer"),
("StatefulSet", "test-basic-postgresql"),
("StatefulSet", "test-basic-redis"),
("StatefulSet", "test-basic-worker"),
("Job", "test-basic-create-user"),
("Job", "test-basic-run-airflow-migrations"),
}
if version == "2.3.2":
expected.add(("Secret", "test-basic-result-backend"))
if self._is_airflow_3_or_above(version):
expected.update(
(
("Deployment", "test-basic-api-server"),
("Deployment", "test-basic-dag-processor"),
("Service", "test-basic-api-server"),
("ServiceAccount", "test-basic-api-server"),
("ServiceAccount", "test-basic-dag-processor"),
("Service", "test-basic-triggerer"),
("Secret", "test-basic-api-secret-key"),
("Secret", "test-basic-jwt-secret"),
)
)
else:
expected.update(
(
("Deployment", "test-basic-webserver"),
("Service", "test-basic-webserver"),
("ServiceAccount", "test-basic-webserver"),
("Secret", "test-basic-webserver-secret-key"),
)
)
if version == "default":
expected.add(("Service", "test-basic-triggerer"))
assert list_of_kind_names_tuples == expected
assert len(k8s_objects) == len(expected)
for k8s_object in k8s_objects:
labels = jmespath.search("metadata.labels", k8s_object) or {}
if "helm.sh/chart" in labels:
chart_name = labels.get("helm.sh/chart")
else:
chart_name = labels.get("chart")
if chart_name and "postgresql" in chart_name:
continue
k8s_name = k8s_object["kind"] + ":" + k8s_object["metadata"]["name"]
assert labels.get("test-label") == "TEST-VALUE", (
f"Missing label test-label on {k8s_name}. Current labels: {labels}"
)
def test_basic_deployments_with_standard_naming(self):
k8s_objects = render_chart(
"test-basic",
{"useStandardNaming": True},
)
actual = {(x["kind"], x["metadata"]["name"]) for x in k8s_objects}
assert actual == DEFAULT_OBJECTS_STD_NAMING
@pytest.mark.parametrize("version", ["2.3.2", "3.0.0", "default"])
def test_basic_deployment_with_standalone_dag_processor(self, version):
k8s_objects = render_chart(
"test-basic",
self._get_values_with_version(
values={
"chart": {
"metadata": "AA",
},
"labels": {"test-label": "TEST-VALUE"},
"fullnameOverride": "test-basic",
"dagProcessor": {"enabled": True},
},
version=version,
),
)
list_of_kind_names_tuples = {
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
}
expected = {
("ServiceAccount", "test-basic-create-user-job"),
("ServiceAccount", "test-basic-migrate-database-job"),
("ServiceAccount", "test-basic-redis"),
("ServiceAccount", "test-basic-scheduler"),
("ServiceAccount", "test-basic-statsd"),
("ServiceAccount", "test-basic-triggerer"),
("ServiceAccount", "test-basic-dag-processor"),
("ServiceAccount", "test-basic-worker"),
("Secret", "test-basic-metadata"),
("Secret", "test-basic-broker-url"),
("Secret", "test-basic-fernet-key"),
("Secret", "test-basic-postgresql"),
("Secret", "test-basic-redis-password"),
("ConfigMap", "test-basic-config"),
("ConfigMap", "test-basic-statsd"),
("Role", "test-basic-pod-launcher-role"),
("Role", "test-basic-pod-log-reader-role"),
("RoleBinding", "test-basic-pod-launcher-rolebinding"),
("RoleBinding", "test-basic-pod-log-reader-rolebinding"),
("Service", "test-basic-postgresql-hl"),
("Service", "test-basic-postgresql"),
("Service", "test-basic-redis"),
("Service", "test-basic-statsd"),
("Service", "test-basic-worker"),
("Deployment", "test-basic-scheduler"),
("Deployment", "test-basic-statsd"),
(self.default_trigger_obj(version), "test-basic-triggerer"),
("Deployment", "test-basic-dag-processor"),
("StatefulSet", "test-basic-postgresql"),
("StatefulSet", "test-basic-redis"),
("StatefulSet", "test-basic-worker"),
("Job", "test-basic-create-user"),
("Job", "test-basic-run-airflow-migrations"),
}
if version == "2.3.2":
expected.add(("Secret", "test-basic-result-backend"))
if self._is_airflow_3_or_above(version):
expected.update(
{
("Service", "test-basic-triggerer"),
("Deployment", "test-basic-api-server"),
("Service", "test-basic-api-server"),
("ServiceAccount", "test-basic-api-server"),
("Secret", "test-basic-api-secret-key"),
("Secret", "test-basic-jwt-secret"),
}
)
else:
expected.update(
{
("Service", "test-basic-webserver"),
("Deployment", "test-basic-webserver"),
("ServiceAccount", "test-basic-webserver"),
("Secret", "test-basic-webserver-secret-key"),
}
)
assert list_of_kind_names_tuples == expected
assert len(k8s_objects) == len(expected)
for k8s_object in k8s_objects:
labels = jmespath.search("metadata.labels", k8s_object) or {}
if "helm.sh/chart" in labels:
chart_name = labels.get("helm.sh/chart")
else:
chart_name = labels.get("chart")
if chart_name and "postgresql" in chart_name:
continue
k8s_name = k8s_object["kind"] + ":" + k8s_object["metadata"]["name"]
assert labels.get("test-label") == "TEST-VALUE", (
f"Missing label test-label on {k8s_name}. Current labels: {labels}"
)
@pytest.mark.parametrize("version", ["2.3.2", "2.4.0", "3.0.0", "default"])
def test_basic_deployment_without_default_users(self, version):
k8s_objects = render_chart(
"test-basic",
values=self._get_values_with_version(
values={"webserver": {"defaultUser": {"enabled": False}}}, version=version
),
)
list_of_kind_names_tuples = [
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
]
assert ("Job", "test-basic-create-user") not in list_of_kind_names_tuples
@pytest.mark.parametrize("version", ["2.3.2", "2.4.0", "3.0.0"])
def test_basic_deployment_without_statsd(self, version):
k8s_objects = render_chart(
"test-basic",
values=self._get_values_with_version(values={"statsd": {"enabled": False}}, version=version),
)
list_of_kind_names_tuples = [
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
]
assert ("ServiceAccount", "test-basic-statsd") not in list_of_kind_names_tuples
assert ("ConfigMap", "test-basic-statsd") not in list_of_kind_names_tuples
assert ("Service", "test-basic-statsd") not in list_of_kind_names_tuples
assert ("Deployment", "test-basic-statsd") not in list_of_kind_names_tuples
@pytest.mark.parametrize(
("airflow_version", "executor"),
[
["2.10.0", "CeleryExecutor"],
["2.10.0", "CeleryKubernetesExecutor"],
["2.10.0", "CeleryExecutor,KubernetesExecutor"],
["3.0.0", "CeleryExecutor"],
["3.0.0", "CeleryExecutor,KubernetesExecutor"],
["default", "CeleryExecutor"],
["default", "CeleryExecutor,KubernetesExecutor"],
],
)
def test_network_policies_are_valid(self, airflow_version, executor):
k8s_objects = render_chart(
name="test-basic",
values=self._get_values_with_version(
values={
"networkPolicies": {"enabled": True},
"executor": executor,
"flower": {"enabled": True},
"pgbouncer": {"enabled": True},
},
version=airflow_version,
),
)
kind_names_tuples = {
(k8s_object["kind"], k8s_object["metadata"]["name"]) for k8s_object in k8s_objects
}
expected_kind_names = [
("NetworkPolicy", "test-basic-redis-policy"),
("NetworkPolicy", "test-basic-flower-policy"),
("NetworkPolicy", "test-basic-pgbouncer-policy"),
("NetworkPolicy", "test-basic-scheduler-policy"),
("NetworkPolicy", "test-basic-statsd-policy"),
("NetworkPolicy", "test-basic-worker-policy"),
]
if self._is_airflow_3_or_above(airflow_version):
expected_kind_names += [
("NetworkPolicy", "test-basic-api-server-policy"),
]
else:
expected_kind_names += [
("NetworkPolicy", "test-basic-webserver-policy"),
]
for kind_name in expected_kind_names:
assert kind_name in kind_names_tuples
@pytest.mark.parametrize(
("airflow_version", "executor"),
[
["2.10.0", "CeleryExecutor"],
["2.10.0", "CeleryExecutor,KubernetesExecutor"],
["3.0.0", "CeleryExecutor"],
["3.0.0", "CeleryExecutor,KubernetesExecutor"],
["default", "CeleryExecutor"],
["default", "CeleryExecutor,KubernetesExecutor"],
],
)
def test_labels_are_valid(self, airflow_version, executor):
"""Test labels are correctly applied on all objects created by this chart."""
release_name = "test-basic"
values = {
"labels": {"label1": "value1", "label2": "value2"},
"executor": executor,
"data": {
"resultBackendConnection": {
"user": "someuser",
"pass": "somepass",
"host": "somehost",
"protocol": "postgresql",
"port": 7777,
"db": "somedb",
"sslmode": "allow",
}
},
"pgbouncer": {"enabled": True},
"redis": {"enabled": True},
"ingress": {"enabled": True},
"networkPolicies": {"enabled": True},
"cleanup": {"enabled": True},
"databaseCleanup": {"enabled": True},
"flower": {"enabled": True},
"dagProcessor": {"enabled": True},
"logs": {"persistence": {"enabled": True}},
"dags": {"persistence": {"enabled": True}},
"postgresql": {"enabled": False}, # We won't check the objects created by the postgres chart
}
if airflow_version != "default":
values["airflowVersion"] = airflow_version
k8s_objects = render_chart(name=release_name, values=values)
kind_k8s_obj_labels_tuples = {
(k8s_object["metadata"]["name"], k8s_object["kind"]): k8s_object["metadata"]["labels"]
for k8s_object in k8s_objects
}
kind_names_tuples = [
(f"{release_name}-airflow-cleanup", "ServiceAccount", "airflow-cleanup-pods"),
(f"{release_name}-airflow-database-cleanup", "ServiceAccount", "database-cleanup"),
(f"{release_name}-config", "ConfigMap", "config"),
(f"{release_name}-airflow-create-user-job", "ServiceAccount", "create-user-job"),
(f"{release_name}-airflow-flower", "ServiceAccount", "flower"),
(f"{release_name}-metadata", "Secret", None),
(f"{release_name}-airflow-migrate-database-job", "ServiceAccount", "run-airflow-migrations"),
(f"{release_name}-airflow-pgbouncer", "ServiceAccount", "pgbouncer"),
(f"{release_name}-result-backend", "Secret", None),
(f"{release_name}-airflow-redis", "ServiceAccount", "redis"),
(f"{release_name}-airflow-scheduler", "ServiceAccount", "scheduler"),
(f"{release_name}-airflow-statsd", "ServiceAccount", "statsd"),
(f"{release_name}-airflow-worker", "ServiceAccount", "worker"),
(f"{release_name}-airflow-triggerer", "ServiceAccount", "triggerer"),
(f"{release_name}-airflow-dag-processor", "ServiceAccount", "dag-processor"),
(f"{release_name}-broker-url", "Secret", "redis"),
(f"{release_name}-cleanup", "CronJob", "airflow-cleanup-pods"),
(f"{release_name}-cleanup-role", "Role", None),
(f"{release_name}-cleanup-rolebinding", "RoleBinding", None),
(f"{release_name}-database-cleanup", "CronJob", "database-cleanup"),
(f"{release_name}-database-cleanup-role", "Role", None),
(f"{release_name}-database-cleanup-rolebinding", "RoleBinding", None),
(f"{release_name}-create-user", "Job", "create-user-job"),
(f"{release_name}-fernet-key", "Secret", None),
(f"{release_name}-flower", "Deployment", "flower"),
(f"{release_name}-flower", "Service", "flower"),
(f"{release_name}-flower-policy", "NetworkPolicy", "airflow-flower-policy"),
(f"{release_name}-flower-ingress", "Ingress", "flower-ingress"),
(f"{release_name}-pgbouncer", "Deployment", "pgbouncer"),
(f"{release_name}-pgbouncer", "Service", "pgbouncer"),
(f"{release_name}-pgbouncer-config", "Secret", "pgbouncer"),
(f"{release_name}-pgbouncer-policy", "NetworkPolicy", "airflow-pgbouncer-policy"),
(f"{release_name}-pgbouncer-stats", "Secret", "pgbouncer"),
(f"{release_name}-pod-launcher-role", "Role", None),
(f"{release_name}-pod-launcher-rolebinding", "RoleBinding", None),
(f"{release_name}-pod-log-reader-role", "Role", None),
(f"{release_name}-pod-log-reader-rolebinding", "RoleBinding", None),
(f"{release_name}-redis", "Service", "redis"),
(f"{release_name}-redis", "StatefulSet", "redis"),
(f"{release_name}-redis-policy", "NetworkPolicy", "redis-policy"),
(f"{release_name}-redis-password", "Secret", "redis"),
(f"{release_name}-run-airflow-migrations", "Job", "run-airflow-migrations"),
(f"{release_name}-scheduler", "Deployment", "scheduler"),
(f"{release_name}-scheduler-policy", "NetworkPolicy", "airflow-scheduler-policy"),
(f"{release_name}-statsd", "Deployment", "statsd"),
(f"{release_name}-statsd", "Service", "statsd"),
(f"{release_name}-statsd-policy", "NetworkPolicy", "statsd-policy"),
(f"{release_name}-worker", "Service", "worker"),
(f"{release_name}-worker", "StatefulSet", "worker"),
(f"{release_name}-worker-policy", "NetworkPolicy", "airflow-worker-policy"),
(f"{release_name}-triggerer", "StatefulSet", "triggerer"),
(f"{release_name}-dag-processor", "Deployment", "dag-processor"),
(f"{release_name}-logs", "PersistentVolumeClaim", "logs-pvc"),
(f"{release_name}-dags", "PersistentVolumeClaim", "dags-pvc"),
]
if self._is_airflow_3_or_above(airflow_version):
kind_names_tuples += [
(f"{release_name}-api-server", "Service", "api-server"),
(f"{release_name}-api-server", "Deployment", "api-server"),
(f"{release_name}-airflow-api-server", "ServiceAccount", "api-server"),
(f"{release_name}-api-secret-key", "Secret", "api-server"),
(f"{release_name}-api-server-policy", "NetworkPolicy", "airflow-api-server-policy"),
]
else:
kind_names_tuples += [
(f"{release_name}-airflow-webserver", "ServiceAccount", "webserver"),
(f"{release_name}-webserver", "Deployment", "webserver"),
(f"{release_name}-webserver", "Service", "webserver"),
(f"{release_name}-webserver-secret-key", "Secret", "webserver"),
(f"{release_name}-webserver-policy", "NetworkPolicy", "airflow-webserver-policy"),
(f"{release_name}-ingress", "Ingress", "airflow-ingress"),
]
for k8s_object_name, kind, component in kind_names_tuples:
expected_labels = {
"label1": "value1",
"label2": "value2",
"tier": "airflow",
"release": release_name,
"heritage": "Helm",
"chart": mock.ANY,
}
if component:
expected_labels["component"] = component
if k8s_object_name == f"{release_name}-scheduler":
expected_labels["executor"] = "CeleryExecutor"
if executor == "CeleryExecutor,KubernetesExecutor":
expected_labels["executor"] = "CeleryExecutor-KubernetesExecutor"
actual_labels = kind_k8s_obj_labels_tuples.pop((k8s_object_name, kind))
assert actual_labels == expected_labels
if kind_k8s_obj_labels_tuples:
warnings.warn(f"Unchecked objects: {kind_k8s_obj_labels_tuples.keys()}")
def test_labels_are_valid_on_job_templates(self):
"""Test labels are correctly applied on all job templates created by this chart."""
release_name = "test-basic"
k8s_objects = render_chart(
name=release_name,
values={
"labels": {"label1": "value1", "label2": "value2"},
"executor": "CeleryExecutor",
"dagProcessor": {"enabled": True},
"pgbouncer": {"enabled": True},
"redis": {"enabled": True},
"networkPolicies": {"enabled": True},
"cleanup": {"enabled": True},
"databaseCleanup": {"enabled": True},
"flower": {"enabled": True},
"postgresql": {"enabled": False}, # We won't check the objects created by the postgres chart
},
)
dict_of_labels_in_job_templates = {
k8s_object["metadata"]["name"]: k8s_object["spec"]["template"]["metadata"]["labels"]
for k8s_object in k8s_objects
if k8s_object["kind"] == "Job"
}
kind_names_tuples = [
(f"{release_name}-create-user", "create-user-job"),
(f"{release_name}-run-airflow-migrations", "run-airflow-migrations"),
]
for k8s_object_name, component in kind_names_tuples:
expected_labels = {
"label1": "value1",
"label2": "value2",
"tier": "airflow",
"release": release_name,
"component": component,
}
assert dict_of_labels_in_job_templates.get(k8s_object_name) == expected_labels
@pytest.mark.parametrize("airflow_version", ["2.10.0", "3.0.0", "default"])
def test_annotations_on_airflow_pods_in_deployment(self, airflow_version):
"""
Test Annotations are correctly applied.
Verifies all pods created Scheduler, Webserver/API-server & Worker deployments.
"""
release_name = "test-basic"
show_only = [
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
"templates/flower/flower-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
]
if self._is_airflow_3_or_above(airflow_version):
show_only += ["templates/api-server/api-server-deployment.yaml"]
else:
show_only += ["templates/webserver/webserver-deployment.yaml"]
k8s_objects = render_chart(
name=release_name,
values=self._get_values_with_version(
values={
"airflowPodAnnotations": {"test-annotation/safe-to-evict": "true"},
"flower": {"enabled": True},
"dagProcessor": {"enabled": True},
},
version=airflow_version,
),
show_only=show_only,
)
# pod_template_file is tested separately as it has extra setup steps
assert len(k8s_objects) == 8
for k8s_object in k8s_objects:
annotations = k8s_object["spec"]["template"]["metadata"]["annotations"]
assert "test-annotation/safe-to-evict" in annotations
assert "true" in annotations["test-annotation/safe-to-evict"]
def test_chart_is_consistent_with_official_airflow_image(self):
def get_k8s_objs_with_image(obj: list[Any] | dict[str, Any]) -> list[dict[str, Any]]:
"""Retrieve all the k8s objects that have an "image" key inside k8s obj or list of k8s obj."""
out = []
if isinstance(obj, list):
for item in obj:
out += get_k8s_objs_with_image(item)
if isinstance(obj, dict):
if "image" in obj:
out += [obj]
# include sub objs, just in case
for val in obj.values():
out += get_k8s_objs_with_image(val)
return out
image_repo = "test-airflow-repo/airflow"
k8s_objects = render_chart("test-basic", {"defaultAirflowRepository": image_repo})
objs_with_image = get_k8s_objs_with_image(k8s_objects)
for obj in objs_with_image:
image: str = obj["image"]
if image.startswith(image_repo):
assert "command" not in obj
@pytest.mark.parametrize(
"executor",
[
"LocalExecutor",
"LocalKubernetesExecutor",
"CeleryExecutor",
"KubernetesExecutor",
"CeleryKubernetesExecutor",
"airflow.providers.amazon.aws.executors.batch.AwsBatchExecutor",
"airflow.providers.amazon.aws.executors.ecs.AwsEcsExecutor",
"CeleryExecutor,KubernetesExecutor",
"CustomExecutor",
"my.org.CustomExecutor",
"CeleryExecutor,CustomExecutor",
],
)
def test_supported_executor(self, executor):
render_chart(
"test-basic",
{
"executor": executor,
},
)
@pytest.mark.parametrize(
"invalid_executor",
[
"Executor", # class name must include more than just Executor
"ExecutorCustom", # class name must end with Executor
"Customexecutor", # lowercase Executor is disallowed
],
)
def test_unsupported_executor(self, invalid_executor):
with pytest.raises(CalledProcessError):
render_chart(
"test-basic",
{
"executor": invalid_executor,
},
)
@pytest.mark.parametrize(
"image",
["airflow", "pod_template", "flower", "statsd", "redis", "pgbouncer", "pgbouncerExporter", "gitSync"],
)
def test_invalid_pull_policy(self, image):
with pytest.raises(CalledProcessError) as ex_ctx:
render_chart(
"test-basic",
{
"images": {image: {"pullPolicy": "InvalidPolicy"}},
},
)
assert (
'pullPolicy must be one of the following: "Always", "Never", "IfNotPresent"'
in ex_ctx.value.stderr.decode()
)
def test_invalid_dags_access_mode(self):
with pytest.raises(CalledProcessError) as ex_ctx:
render_chart(
"test-basic",
{
"dags": {"persistence": {"accessMode": "InvalidMode"}},
},
)
assert (
'accessMode must be one of the following: "ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"'
in ex_ctx.value.stderr.decode()
)
@pytest.mark.parametrize("namespace", ["abc", "123", "123abc", "123-abc"])
def test_namespace_names(self, namespace):
"""Test various namespace names to make sure they render correctly in templates."""
render_chart(namespace=namespace)
def test_postgres_connection_url_no_override(self):
# no nameoverride provided
doc = render_chart(
"my-release",
show_only=["templates/secrets/metadata-connection-secret.yaml"],
)[0]
assert (
base64.b64decode(doc["data"]["connection"]).decode("utf-8")
== "postgresql://postgres:postgres@my-release-postgresql.default:5432/postgres?sslmode=disable"
)
def test_postgres_connection_url_pgbouncer(self):
# no nameoverride, pgbouncer
doc = render_chart(
"my-release",
show_only=["templates/secrets/metadata-connection-secret.yaml"],
values={"pgbouncer": {"enabled": True}},
)[0]
assert (
base64.b64decode(doc["data"]["connection"]).decode("utf-8")
== "postgresql://postgres:postgres@my-release-pgbouncer.default:6543/"
"my-release-metadata?sslmode=disable"
)
def test_postgres_connection_url_pgbouncer_use_standard_naming(self):
# no nameoverride, pgbouncer and useStandardNaming
doc = render_chart(
"my-release",
show_only=["templates/secrets/metadata-connection-secret.yaml"],
values={"useStandardNaming": True, "pgbouncer": {"enabled": True}},
)[0]
assert (
base64.b64decode(doc["data"]["connection"]).decode("utf-8")
== "postgresql://postgres:postgres@my-release-airflow-pgbouncer.default:6543/"
"my-release-metadata?sslmode=disable"
)
def test_postgres_connection_url_name_override(self):
# nameoverride provided
doc = render_chart(
"my-release",
show_only=["templates/secrets/metadata-connection-secret.yaml"],
values={"postgresql": {"nameOverride": "overrideName"}},
)[0]
assert (
base64.b64decode(doc["data"]["connection"]).decode("utf-8")
== "postgresql://postgres:postgres@overrideName:5432/postgres?sslmode=disable"
)
def test_priority_classes(self):
pc = [
{"name": "class1", "preemptionPolicy": "PreemptLowerPriority", "value": 1000},
{"name": "class2", "preemptionPolicy": "Never", "value": 10000},
]
objs = render_chart(
"my-release",
show_only=["templates/priorityclasses/priority-classes.yaml"],
values={"priorityClasses": pc},
)
assert len(objs) == 2
for i in range(len(objs)):
assert objs[i]["kind"] == "PriorityClass"
assert objs[i]["apiVersion"] == "scheduling.k8s.io/v1"
assert objs[i]["metadata"]["name"] == ("my-release" + "-" + pc[i]["name"])
assert objs[i]["preemptionPolicy"] == pc[i]["preemptionPolicy"]
assert objs[i]["value"] == pc[i]["value"]
assert objs[i]["description"] == "This priority class will not cause other pods to be preempted."
def test_priority_classes_default_preemption(self):
obj = render_chart(
"my-release",
show_only=["templates/priorityclasses/priority-classes.yaml"],
values={
"priorityClasses": [
{"name": "class1", "value": 10000},
]
},
)[0]
assert obj["preemptionPolicy"] == "PreemptLowerPriority"
assert obj["description"] == "This priority class will not cause other pods to be preempted."
def test_redis_broker_connection_url(self):
# no nameoverride, redis
doc = render_chart(
"my-release",
show_only=["templates/secrets/redis-secrets.yaml"],
values={"redis": {"enabled": True, "password": "test1234"}},
)[1]
assert (
base64.b64decode(doc["data"]["connection"]).decode("utf-8")
== "redis://:test1234@my-release-redis:6379/0"
)
def test_redis_broker_connection_url_use_standard_naming(self):
# no nameoverride, redis and useStandardNaming
doc = render_chart(
"my-release",
show_only=["templates/secrets/redis-secrets.yaml"],
values={"useStandardNaming": True, "redis": {"enabled": True, "password": "test1234"}},
)[1]
assert (
base64.b64decode(doc["data"]["connection"]).decode("utf-8")
== "redis://:test1234@my-release-airflow-redis:6379/0"
)
@staticmethod
def default_trigger_obj(version):
if version in {"default", "3.0.0"}:
return "StatefulSet"
return "Deployment"
| TestBaseChartTest |
python | getsentry__sentry | tests/sentry/codecov/endpoints/test_test_results.py | {
"start": 3603,
"end": 13559
} | class ____(APITestCase):
endpoint_name = "sentry-api-0-test-results"
def setUp(self) -> None:
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.integration = self.create_integration(
organization=self.organization,
external_id="1234",
name="testowner",
provider="github",
)
self.login_as(user=self.user)
def reverse_url(self, owner="testowner", repository="testrepo"):
"""Custom reverse URL method to handle required URL parameters"""
return reverse(
self.endpoint_name,
kwargs={
"organization_id_or_slug": self.organization.slug,
"owner": self.integration.id,
"repository": repository,
},
)
@patch("sentry.codecov.endpoints.test_results.test_results.CodecovApiClient")
def test_get_returns_mock_response_with_default_variables(
self, mock_codecov_client_class: MagicMock
) -> None:
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response_populated
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
response = self.client.get(url)
mock_codecov_client_class.assert_called_once_with(git_provider_org="testowner")
# Verify the correct variables are passed to the GraphQL query
expected_variables = {
"owner": "testowner",
"repo": "testrepo",
"filters": {
"branch": None,
"parameter": None,
"interval": "INTERVAL_30_DAY",
"flags": None,
"term": None,
"test_suites": None,
},
"ordering": {
"direction": "DESC",
"parameter": "RUNS_FAILED",
},
"first": 20,
"last": None,
"before": None,
"after": None,
}
mock_codecov_client_instance.query.assert_called_once()
call_args = mock_codecov_client_instance.query.call_args
assert call_args[1]["variables"] == expected_variables
assert response.status_code == 200
assert len(response.data["results"]) == 2
assert response.data["pageInfo"]["endCursor"] == "cursor123"
assert response.data["pageInfo"]["hasNextPage"] is False
assert response.data["pageInfo"]["hasPreviousPage"] is False
assert response.data["pageInfo"]["startCursor"] is None
assert response.data["totalCount"] == 2
serializer_fields = set(NodeSerializer().fields.keys())
response_keys = set(response.data["results"][0].keys())
assert (
response_keys == serializer_fields
), f"Response keys {response_keys} don't match serializer fields {serializer_fields}"
@patch("sentry.codecov.endpoints.test_results.test_results.CodecovApiClient")
def test_get_with_query_parameters(self, mock_codecov_client_class: MagicMock) -> None:
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response_empty
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
query_params = {
"branch": "develop",
"filterBy": "FLAKY_TESTS",
"sortBy": "-AVG_DURATION",
"interval": "INTERVAL_7_DAY",
"limit": "10",
"testSuites": ["../usr/local", "../usr/local-2"],
}
response = self.client.get(url, query_params)
# Verify the correct variables are passed with custom query parameters
expected_variables = {
"owner": "testowner",
"repo": "testrepo",
"filters": {
"branch": "develop",
"parameter": "FLAKY_TESTS",
"interval": "INTERVAL_7_DAY",
"flags": None,
"term": None,
"test_suites": ["../usr/local", "../usr/local-2"],
},
"ordering": {
"direction": "DESC",
"parameter": "AVG_DURATION",
},
"first": 10,
"last": None,
"before": None,
"after": None,
}
call_args = mock_codecov_client_instance.query.call_args
assert call_args[1]["variables"] == expected_variables
assert response.status_code == 200
@patch("sentry.codecov.endpoints.test_results.test_results.CodecovApiClient")
def test_get_with_term_filter(self, mock_codecov_client_class: MagicMock) -> None:
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response_empty
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
query_params = {
"term": "test::function_with_underscores-and-dashes",
"branch": "develop",
"filterBy": "FLAKY_TESTS",
"sortBy": "AVG_DURATION",
"interval": "INTERVAL_7_DAY",
"limit": "15",
}
response = self.client.get(url, query_params)
expected_variables = {
"owner": "testowner",
"repo": "testrepo",
"filters": {
"branch": "develop",
"parameter": "FLAKY_TESTS",
"interval": "INTERVAL_7_DAY",
"flags": None,
"term": "test::function_with_underscores-and-dashes",
"test_suites": None,
},
"ordering": {
"direction": "ASC",
"parameter": "AVG_DURATION",
},
"first": 15,
"last": None,
"before": None,
"after": None,
}
call_args = mock_codecov_client_instance.query.call_args
assert call_args[1]["variables"] == expected_variables
assert response.status_code == 200
@patch("sentry.codecov.endpoints.test_results.test_results.CodecovApiClient")
def test_get_with_cursor_alone_uses_default_limit_and_navigation(
self, mock_codecov_client_class
):
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response_empty
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
query_params = {"cursor": "some-cursor"}
response = self.client.get(url, query_params)
expected_variables = {
"owner": "testowner",
"repo": "testrepo",
"filters": {
"branch": None,
"parameter": None,
"interval": "INTERVAL_30_DAY",
"flags": None,
"term": None,
"test_suites": None,
},
"ordering": {
"direction": "DESC",
"parameter": "RUNS_FAILED",
},
"first": 20,
"last": None,
"before": None,
"after": "some-cursor",
}
call_args = mock_codecov_client_instance.query.call_args
assert call_args[1]["variables"] == expected_variables
assert response.status_code == 200
@patch("sentry.codecov.endpoints.test_results.test_results.CodecovApiClient")
def test_get_with_cursor_and_direction(self, mock_codecov_client_class: MagicMock) -> None:
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response_empty
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
query_params = {"cursor": "cursor123", "limit": "10", "navigation": "prev"}
response = self.client.get(url, query_params)
expected_variables = {
"owner": "testowner",
"repo": "testrepo",
"filters": {
"branch": None,
"parameter": None,
"interval": "INTERVAL_30_DAY",
"flags": None,
"term": None,
"test_suites": None,
},
"ordering": {
"direction": "DESC",
"parameter": "RUNS_FAILED",
},
"first": None,
"last": 10,
"before": "cursor123",
"after": None,
}
call_args = mock_codecov_client_instance.query.call_args
assert call_args[1]["variables"] == expected_variables
assert response.status_code == 200
def test_get_with_negative_limit_returns_bad_request(self) -> None:
url = self.reverse_url()
query_params = {"limit": "-5"}
response = self.client.get(url, query_params)
assert response.status_code == 400
assert response.data == {"details": "provided `limit` parameter must be a positive integer"}
def test_get_with_limit_as_string_returns_bad_request(self) -> None:
url = self.reverse_url()
query_params = {"limit": "asdf"}
response = self.client.get(url, query_params)
assert response.status_code == 400
assert response.data == {"details": "provided `limit` parameter must be a positive integer"}
| TestResultsEndpointTest |
python | TheAlgorithms__Python | graphs/minimum_spanning_tree_prims2.py | {
"start": 6255,
"end": 8950
} | class ____[T]:
"""
Graph Undirected Weighted Class
Functions:
add_node: function to add a node in the graph
add_edge: function to add an edge between 2 nodes in the graph
"""
def __init__(self) -> None:
self.connections: dict[T, dict[T, int]] = {}
self.nodes: int = 0
def __repr__(self) -> str:
return str(self.connections)
def __len__(self) -> int:
return self.nodes
def add_node(self, node: T) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
self.connections[node] = {}
self.nodes += 1
def add_edge(self, node1: T, node2: T, weight: int) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(node1)
self.add_node(node2)
self.connections[node1][node2] = weight
self.connections[node2][node1] = weight
def prims_algo[T](
graph: GraphUndirectedWeighted[T],
) -> tuple[dict[T, int], dict[T, T | None]]:
"""
>>> graph = GraphUndirectedWeighted()
>>> graph.add_edge("a", "b", 3)
>>> graph.add_edge("b", "c", 10)
>>> graph.add_edge("c", "d", 5)
>>> graph.add_edge("a", "c", 15)
>>> graph.add_edge("b", "d", 100)
>>> dist, parent = prims_algo(graph)
>>> abs(dist["a"] - dist["b"])
3
>>> abs(dist["d"] - dist["b"])
15
>>> abs(dist["a"] - dist["c"])
13
"""
# prim's algorithm for minimum spanning tree
dist: dict[T, int] = dict.fromkeys(graph.connections, maxsize)
parent: dict[T, T | None] = dict.fromkeys(graph.connections)
priority_queue: MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(node, weight)
if priority_queue.is_empty():
return dist, parent
# initialization
node = priority_queue.extract_min()
dist[node] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
dist[neighbour] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(neighbour, dist[neighbour])
parent[neighbour] = node
# running prim's algorithm
while not priority_queue.is_empty():
node = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
dist[neighbour] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(neighbour, dist[neighbour])
parent[neighbour] = node
return dist, parent
| GraphUndirectedWeighted |
python | pytorch__pytorch | torch/nn/modules/batchnorm.py | {
"start": 512,
"end": 4759
} | class ____(Module):
"""Common base of _InstanceNorm and _BatchNorm."""
_version = 2
__constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine"]
num_features: int
eps: float
momentum: Optional[float]
affine: bool
track_running_stats: bool
# WARNING: weight and bias purposely not defined here.
# See https://github.com/pytorch/pytorch/issues/39670
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: Optional[float] = 0.1,
affine: bool = True,
track_running_stats: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
if self.track_running_stats:
self.register_buffer(
"running_mean", torch.zeros(num_features, **factory_kwargs)
)
self.register_buffer(
"running_var", torch.ones(num_features, **factory_kwargs)
)
self.running_mean: Optional[Tensor]
self.running_var: Optional[Tensor]
self.register_buffer(
"num_batches_tracked",
torch.tensor(
0,
dtype=torch.long,
# pyrefly: ignore [bad-argument-type]
**{k: v for k, v in factory_kwargs.items() if k != "dtype"},
),
)
self.num_batches_tracked: Optional[Tensor]
else:
self.register_buffer("running_mean", None)
self.register_buffer("running_var", None)
self.register_buffer("num_batches_tracked", None)
self.reset_parameters()
def reset_running_stats(self) -> None:
if self.track_running_stats:
# running_mean/running_var/num_batches... are registered at runtime depending
# if self.track_running_stats is on
self.running_mean.zero_() # type: ignore[union-attr]
self.running_var.fill_(1) # type: ignore[union-attr]
self.num_batches_tracked.zero_() # type: ignore[union-attr,operator]
def reset_parameters(self) -> None:
self.reset_running_stats()
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def _check_input_dim(self, input):
raise NotImplementedError
def extra_repr(self):
return (
"{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
"track_running_stats={track_running_stats}".format(**self.__dict__)
)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
) -> None:
version = local_metadata.get("version", None)
if (version is None or version < 2) and self.track_running_stats:
# at version 2: added num_batches_tracked buffer
# this should have a default value of 0
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key not in state_dict:
state_dict[num_batches_tracked_key] = (
self.num_batches_tracked
if self.num_batches_tracked is not None
and self.num_batches_tracked.device != torch.device("meta")
else torch.tensor(0, dtype=torch.long)
)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
| _NormBase |
python | Textualize__textual | docs/examples/widgets/label.py | {
"start": 79,
"end": 241
} | class ____(App):
def compose(self) -> ComposeResult:
yield Label("Hello, world!")
if __name__ == "__main__":
app = LabelApp()
app.run()
| LabelApp |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/pipeline.py | {
"start": 1710,
"end": 4183
} | class ____(Step):
"""
Pipeline step to check if the connector is a candidate for migration to manifest-only.
"""
context: ConnectorContext
title: str = "Validate Manifest Migration Candidate"
airbyte_repo: git.Repo = git.Repo(search_parent_directories=True)
async def _run(self) -> StepResult:
connector = self.context.connector
invalid_files: list = []
## 1. Confirm the connector is low-code and not already manifest-only
if connector.language != ConnectorLanguage.LOW_CODE:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr="The connector is not a low-code connector.",
)
if connector.language == ConnectorLanguage.MANIFEST_ONLY:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr="The connector is already in manifest-only format.",
)
## 2. Detect invalid python files in the connector's source directory
for file in connector.python_source_dir_path.iterdir():
if file.name not in MANIFEST_ONLY_COMPATIBLE_FILES:
invalid_files.append(file.name)
if invalid_files:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stdout=f"The connector has unrecognized source files: {', '.join(invalid_files)}",
)
## 3. Detect connector class name to make sure it's inherited from source-declarative-manifest
# and does not override the `streams` method
connector_source_py = (connector.python_source_dir_path / "source.py").read_text()
if "YamlDeclarativeSource" not in connector_source_py:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stdout="The connector does not use the YamlDeclarativeSource class.",
)
if "def streams" in connector_source_py:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stdout="The connector overrides the streams method.",
)
# All checks passed, the connector is a valid candidate for migration
return StepResult(step=self, status=StepStatus.SUCCESS, stdout=f"{connector.technical_name} is a valid candidate for migration.")
| CheckIsManifestMigrationCandidate |
python | jpadilla__pyjwt | jwt/types.py | {
"start": 348,
"end": 2524
} | class ____(TypedDict, total=False):
"""Options for :py:func:`jwt.decode()` and :py:func:`jwt.api_jwt.decode_complete()` (TypedDict).
.. warning::
Some claims, such as ``exp``, ``iat``, ``jti``, ``nbf``, and ``sub``,
will only be verified if present. Please refer to the documentation below
for which ones, and make sure to include them in the ``require`` param
if you want to make sure that they are always present (and therefore always verified
if ``verify_{claim} = True`` for that claim).
"""
verify_signature: bool
"""Default: ``True``. Verify the JWT cryptographic signature."""
require: list[str]
"""Default: ``[]``. List of claims that must be present.
Example: ``require=["exp", "iat", "nbf"]``.
**Only verifies that the claims exists**. Does not verify that the claims are valid."""
strict_aud: bool
"""Default: ``False``. (requires ``verify_aud=True``) Check that the ``aud`` claim is a single value (not a list), and matches ``audience`` exactly."""
verify_aud: bool
"""Default: ``verify_signature``. Check that ``aud`` (audience) claim matches ``audience``."""
verify_exp: bool
"""Default: ``verify_signature``. Check that ``exp`` (expiration) claim value is in the future (if present in payload). """
verify_iat: bool
"""Default: ``verify_signature``. Check that ``iat`` (issued at) claim value is an integer (if present in payload). """
verify_iss: bool
"""Default: ``verify_signature``. Check that ``iss`` (issuer) claim matches ``issuer``. """
verify_jti: bool
"""Default: ``verify_signature``. Check that ``jti`` (JWT ID) claim is a string (if present in payload). """
verify_nbf: bool
"""Default: ``verify_signature``. Check that ``nbf`` (not before) claim value is in the past (if present in payload). """
verify_sub: bool
"""Default: ``verify_signature``. Check that ``sub`` (subject) claim is a string and matches ``subject`` (if present in payload). """
# The only difference between Options and FullOptions is that FullOptions
# required _every_ value to be there; Options doesn't require any
| Options |
python | walkccc__LeetCode | solutions/2592. Maximize Greatness of an Array/2592.py | {
"start": 0,
"end": 181
} | class ____:
def maximizeGreatness(self, nums: list[int]) -> int:
ans = 0
nums.sort()
for num in nums:
if num > nums[ans]:
ans += 1
return ans
| Solution |
python | django__django | tests/generic_views/views.py | {
"start": 919,
"end": 1339
} | class ____(generic.DetailView):
template_name = "generic_views/author_detail.html"
queryset = Author.objects.all()
def get(self, request, *args, **kwargs):
# Ensures get_context_object_name() doesn't reference self.object.
author = self.get_object()
context = {"custom_" + self.get_context_object_name(author): author}
return self.render_to_response(context)
| AuthorCustomDetail |
python | paramiko__paramiko | demos/demo_server.py | {
"start": 1201,
"end": 5860
} | class ____(paramiko.ServerInterface):
# 'data' is the output of base64.b64encode(key)
# (using the "user_rsa_key" files)
data = (
b"AAAAB3NzaC1yc2EAAAABIwAAAIEAyO4it3fHlmGZWJaGrfeHOVY7RWO3P9M7hp"
b"fAu7jJ2d7eothvfeuoRFtJwhUmZDluRdFyhFY/hFAh76PJKGAusIqIQKlkJxMC"
b"KDqIexkgHAfID/6mqvmnSJf0b5W8v5h2pI/stOSwTQ+pxVhwJ9ctYDhRSlF0iT"
b"UWT10hcuO4Ks8="
)
good_pub_key = paramiko.RSAKey(data=decodebytes(data))
def __init__(self):
self.event = threading.Event()
def check_channel_request(self, kind, chanid):
if kind == "session":
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_password(self, username, password):
if (username == "robey") and (password == "foo"):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_auth_publickey(self, username, key):
print("Auth attempt with key: " + u(hexlify(key.get_fingerprint())))
if (username == "robey") and (key == self.good_pub_key):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_auth_gssapi_with_mic(
self, username, gss_authenticated=paramiko.AUTH_FAILED, cc_file=None
):
"""
.. note::
We are just checking in `AuthHandler` that the given user is a
valid krb5 principal! We don't check if the krb5 principal is
allowed to log in on the server, because there is no way to do that
in python. So if you develop your own SSH server with paramiko for
a certain platform like Linux, you should call ``krb5_kuserok()`` in
your local kerberos library to make sure that the krb5_principal
has an account on the server and is allowed to log in as a user.
.. seealso::
`krb5_kuserok() man page
<http://www.unix.com/man-page/all/3/krb5_kuserok/>`_
"""
if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_auth_gssapi_keyex(
self, username, gss_authenticated=paramiko.AUTH_FAILED, cc_file=None
):
if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def enable_auth_gssapi(self):
return True
def get_allowed_auths(self, username):
return "gssapi-keyex,gssapi-with-mic,password,publickey"
def check_channel_shell_request(self, channel):
self.event.set()
return True
def check_channel_pty_request(
self, channel, term, width, height, pixelwidth, pixelheight, modes
):
return True
DoGSSAPIKeyExchange = True
# now connect
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", 2200))
except Exception as e:
print("*** Bind failed: " + str(e))
traceback.print_exc()
sys.exit(1)
try:
sock.listen(100)
print("Listening for connection ...")
client, addr = sock.accept()
except Exception as e:
print("*** Listen/accept failed: " + str(e))
traceback.print_exc()
sys.exit(1)
print("Got a connection!")
try:
t = paramiko.Transport(client, gss_kex=DoGSSAPIKeyExchange)
t.set_gss_host(socket.getfqdn(""))
try:
t.load_server_moduli()
except:
print("(Failed to load moduli -- gex will be unsupported.)")
raise
t.add_server_key(host_key)
server = Server()
try:
t.start_server(server=server)
except paramiko.SSHException:
print("*** SSH negotiation failed.")
sys.exit(1)
# wait for auth
chan = t.accept(20)
if chan is None:
print("*** No channel.")
sys.exit(1)
print("Authenticated!")
server.event.wait(10)
if not server.event.is_set():
print("*** Client never asked for a shell.")
sys.exit(1)
chan.send("\r\n\r\nWelcome to my dorky little BBS!\r\n\r\n")
chan.send(
"We are on fire all the time! Hooray! Candy corn for everyone!\r\n"
)
chan.send("Happy birthday to Robot Dave!\r\n\r\n")
chan.send("Username: ")
f = chan.makefile("rU")
username = f.readline().strip("\r\n")
chan.send("\r\nI don't like you, " + username + ".\r\n")
chan.close()
except Exception as e:
print("*** Caught exception: " + str(e.__class__) + ": " + str(e))
traceback.print_exc()
try:
t.close()
except:
pass
sys.exit(1)
| Server |
python | great-expectations__great_expectations | tests/expectations/test_conditions.py | {
"start": 2844,
"end": 5301
} | class ____:
def test_column_hash_equal(self):
assert hash(Column("age")) == hash(Column("age"))
def test_column_hash_not_equal(self):
assert hash(Column("age")) != hash(Column("city"))
def test_less_than_operator(self):
col = Column("age")
result = col < 18
assert result == ComparisonCondition(column=col, operator=Operator.LESS_THAN, parameter=18)
def test_less_than_or_equal_operator(self):
col = Column("age")
result = col <= 18
assert result == ComparisonCondition(
column=col, operator=Operator.LESS_THAN_OR_EQUAL, parameter=18
)
def test_equal_operator(self):
col = Column("status")
result = col == "active"
assert result == ComparisonCondition(
column=col, operator=Operator.EQUAL, parameter="active"
)
def test_not_equal_operator(self):
col = Column("status")
result = col != "inactive"
assert result == ComparisonCondition(
column=col, operator=Operator.NOT_EQUAL, parameter="inactive"
)
def test_greater_than_operator(self):
col = Column("age")
result = col > 65
assert result == ComparisonCondition(
column=col, operator=Operator.GREATER_THAN, parameter=65
)
def test_greater_than_or_equal_operator(self):
col = Column("age")
result = col >= 65
assert result == ComparisonCondition(
column=col, operator=Operator.GREATER_THAN_OR_EQUAL, parameter=65
)
def test_is_in_method(self):
col = Column("status")
result = col.is_in(["active", "pending", "approved"])
assert result == ComparisonCondition(
column=col, operator=Operator.IN, parameter=["active", "pending", "approved"]
)
def test_is_not_in_method(self):
col = Column("status")
result = col.is_not_in(["inactive", "deleted"])
assert result == ComparisonCondition(
column=col, operator=Operator.NOT_IN, parameter=["inactive", "deleted"]
)
def test_is_null_method(self):
col = Column("email")
result = col.is_null()
assert result == NullityCondition(column=col, is_null=True)
def test_is_not_null_method(self):
col = Column("email")
result = col.is_not_null()
assert result == NullityCondition(column=col, is_null=False)
| TestColumn |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 948925,
"end": 949323
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("RequestedReviewer", graphql_name="node")
"""The item at the end of the edge."""
| RequestedReviewerEdge |
python | walkccc__LeetCode | solutions/1198. Find Smallest Common Element in All Rows/1198.py | {
"start": 0,
"end": 254
} | class ____:
def smallestCommonElement(self, mat: list[list[int]]) -> int:
MAX = 10000
count = [0] * (MAX + 1)
for row in mat:
for a in row:
count[a] += 1
if count[a] == len(mat):
return a
return -1
| Solution |
python | pytorch__pytorch | test/distributed/tensor/test_dtensor_testbase.py | {
"start": 353,
"end": 1446
} | class ____(DTensorTestBase):
"""
This class tests if the basic functionalities of DTensorTestBase are
working as expected on CPU, regardless of the presence of CUDA devices.
"""
@property
def backend(self):
return "gloo"
@property
def device_type(self) -> str:
return "cpu"
@property
def world_size(self):
return np.prod(list(self.mesh_dim_sizes.values())).item()
@property
def mesh_dim_sizes(self) -> dict[str, int]:
"""Mapping from mesh dimension names to sizes."""
return {"data": 2, "fsdp": 3, "tensor": 5}
def build_device_mesh(self) -> DeviceMesh:
return init_device_mesh(
self.device_type,
mesh_shape=tuple(self.mesh_dim_sizes.values()),
mesh_dim_names=tuple(self.mesh_dim_sizes.keys()),
)
@with_comms
def test_dtensor_testbase_destroy_pg(self):
# This tests destroy_pg() correctly finishes.
device_mesh = self.build_device_mesh() # noqa: F841
if __name__ == "__main__":
run_tests()
| DTensorTestBaseUtilCPUTest |
python | celery__celery | t/unit/backends/test_asynchronous.py | {
"start": 550,
"end": 4741
} | class ____:
"""
Base test class for the Default / Gevent / Eventlet drainers.
"""
interval = 0.1 # Check every tenth of a second
MAX_TIMEOUT = 10 # Specify a max timeout so it doesn't run forever
def get_drainer(self, environment):
with patch('celery.backends.asynchronous.detect_environment') as d:
d.return_value = environment
backend = Backend(self.app)
consumer = BaseResultConsumer(backend, self.app, backend.accept,
pending_results={},
pending_messages={})
consumer.drain_events = Mock(side_effect=self.result_consumer_drain_events)
return consumer.drainer
@pytest.fixture(autouse=True)
def setup_drainer(self):
raise NotImplementedError
@cached_property
def sleep(self):
"""
Sleep on the event loop.
"""
raise NotImplementedError
def schedule_thread(self, thread):
"""
Set up a thread that runs on the event loop.
"""
raise NotImplementedError
def teardown_thread(self, thread):
"""
Wait for a thread to stop.
"""
raise NotImplementedError
def result_consumer_drain_events(self, timeout=None):
"""
Subclasses should override this method to define the behavior of
drainer.result_consumer.drain_events.
"""
raise NotImplementedError
def test_drain_checks_on_interval(self):
p = promise()
def fulfill_promise_thread():
self.sleep(self.interval * 2)
p('done')
fulfill_thread = self.schedule_thread(fulfill_promise_thread)
on_interval = Mock()
for _ in self.drainer.drain_events_until(p,
on_interval=on_interval,
interval=self.interval,
timeout=self.MAX_TIMEOUT):
pass
self.teardown_thread(fulfill_thread)
assert p.ready, 'Should have terminated with promise being ready'
assert on_interval.call_count < 20, 'Should have limited number of calls to on_interval'
def test_drain_does_not_block_event_loop(self):
"""
This test makes sure that other greenlets can still operate while drain_events_until is
running.
"""
p = promise()
liveness_mock = Mock()
def fulfill_promise_thread():
self.sleep(self.interval * 2)
p('done')
def liveness_thread():
while 1:
if p.ready:
return
self.sleep(self.interval / 10)
liveness_mock()
fulfill_thread = self.schedule_thread(fulfill_promise_thread)
liveness_thread = self.schedule_thread(liveness_thread)
on_interval = Mock()
for _ in self.drainer.drain_events_until(p,
on_interval=on_interval,
interval=self.interval,
timeout=self.MAX_TIMEOUT):
pass
self.teardown_thread(fulfill_thread)
self.teardown_thread(liveness_thread)
assert p.ready, 'Should have terminated with promise being ready'
assert on_interval.call_count <= liveness_mock.call_count, \
'Should have served liveness_mock while waiting for event'
def test_drain_timeout(self):
p = promise()
on_interval = Mock()
with pytest.raises(socket.timeout):
for _ in self.drainer.drain_events_until(p,
on_interval=on_interval,
interval=self.interval,
timeout=self.interval * 5):
pass
assert not p.ready, 'Promise should remain un-fulfilled'
assert on_interval.call_count < 20, 'Should have limited number of calls to on_interval'
| DrainerTests |
python | huggingface__transformers | src/transformers/models/ijepa/modeling_ijepa.py | {
"start": 13295,
"end": 14554
} | class ____(PreTrainedModel):
config: IJepaConfig
base_model_prefix = "ijepa"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["IJepaEmbeddings", "IJepaLayer"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": IJepaLayer,
"attentions": IJepaSelfAttention,
}
@torch.no_grad()
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, IJepaEmbeddings):
init.trunc_normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range)
if module.mask_token is not None:
init.zeros_(module.mask_token)
| IJepaPreTrainedModel |
python | xlwings__xlwings | xlwings/_xlwindows.py | {
"start": 54963,
"end": 55288
} | class ____(base_classes.Note):
def __init__(self, xl):
self.xl = xl
@property
def api(self):
return self.xl
@property
def text(self):
return self.xl.Text()
@text.setter
def text(self, value):
self.xl.Text(value)
def delete(self):
self.xl.Delete()
| Note |
python | getsentry__sentry | src/sentry/releases/endpoints/release_deploys.py | {
"start": 1927,
"end": 5119
} | class ____(serializers.Serializer):
name = serializers.CharField(
max_length=64,
required=False,
allow_blank=True,
allow_null=True,
help_text="The optional name of the deploy",
)
environment = serializers.CharField(
max_length=64, help_text="The environment you're deploying to"
)
url = serializers.URLField(
required=False,
allow_blank=True,
allow_null=True,
help_text="The optional URL that points to the deploy",
)
dateStarted = serializers.DateTimeField(
required=False,
allow_null=True,
help_text="An optional date that indicates when the deploy started",
)
dateFinished = serializers.DateTimeField(
required=False,
allow_null=True,
help_text="An optional date that indicates when the deploy ended. If not provided, the current time is used.",
)
projects = serializers.ListField(
child=ProjectField(scope=("project:read", "project:releases"), id_allowed=True),
required=False,
allow_empty=False,
help_text="The optional list of project slugs to create a deploy within. If not provided, deploys are created for all of the release's projects.",
)
def validate_environment(self, value):
if not Environment.is_valid_name(value):
raise serializers.ValidationError("Invalid value for environment")
return value
def create_deploy(
organization: Organization, release: Release, serializer: DeploySerializer
) -> Deploy:
result = serializer.validated_data
release_projects = list(release.projects.all())
projects = result.get("projects", release_projects)
invalid_projects = {project.slug for project in projects} - {
project.slug for project in release_projects
}
if len(invalid_projects) > 0:
raise ParameterValidationError(
f"Invalid projects ({', '.join(invalid_projects)}) for release {release.version}"
)
env = Environment.objects.get_or_create(
name=result["environment"], organization_id=organization.id
)[0]
for project in projects:
env.add_project(project)
deploy = Deploy.objects.create(
organization_id=organization.id,
release=release,
environment_id=env.id,
date_finished=result.get("dateFinished", timezone.now()),
date_started=result.get("dateStarted"),
name=result.get("name"),
url=result.get("url"),
)
deploy_created.send_robust(deploy=deploy, sender=create_deploy)
# XXX(dcramer): this has a race for most recent deploy, but
# should be unlikely to hit in the real world
Release.objects.filter(id=release.id).update(
total_deploys=F("total_deploys") + 1, last_deploy_id=deploy.id
)
for project in projects:
ReleaseProjectEnvironment.objects.create_or_update(
release=release,
environment=env,
project=project,
values={"last_deploy_id": deploy.id},
)
Deploy.notify_if_ready(deploy.id)
return deploy
@extend_schema(tags=["Releases"])
@region_silo_endpoint
| DeploySerializer |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_prediction_writer.py | {
"start": 935,
"end": 5393
} | class ____(BasePredictionWriter):
def write_on_batch_end(self, *_, **__):
pass
def write_on_epoch_end(self, *_, **__):
pass
def test_prediction_writer_invalid_write_interval():
"""Test that configuring an unknown interval name raises an error."""
with pytest.raises(MisconfigurationException, match=r"`write_interval` should be one of \['batch"):
DummyPredictionWriter("something")
def test_prediction_writer_hook_call_intervals(tmp_path):
"""Test that the `write_on_batch_end` and `write_on_epoch_end` hooks get invoked based on the defined interval."""
DummyPredictionWriter.write_on_batch_end = Mock()
DummyPredictionWriter.write_on_epoch_end = Mock()
dataloader = DataLoader(RandomDataset(32, 64))
model = BoringModel()
cb = DummyPredictionWriter("batch_and_epoch")
trainer = Trainer(default_root_dir=tmp_path, logger=False, limit_predict_batches=4, callbacks=cb)
results = trainer.predict(model, dataloaders=dataloader)
assert len(results) == 4
assert cb.write_on_batch_end.call_count == 4
assert cb.write_on_epoch_end.call_count == 1
DummyPredictionWriter.write_on_batch_end.reset_mock()
DummyPredictionWriter.write_on_epoch_end.reset_mock()
cb = DummyPredictionWriter("batch_and_epoch")
trainer = Trainer(default_root_dir=tmp_path, logger=False, limit_predict_batches=4, callbacks=cb)
trainer.predict(model, dataloaders=dataloader, return_predictions=False)
assert cb.write_on_batch_end.call_count == 4
assert cb.write_on_epoch_end.call_count == 1
DummyPredictionWriter.write_on_batch_end.reset_mock()
DummyPredictionWriter.write_on_epoch_end.reset_mock()
cb = DummyPredictionWriter("batch")
trainer = Trainer(default_root_dir=tmp_path, logger=False, limit_predict_batches=4, callbacks=cb)
trainer.predict(model, dataloaders=dataloader, return_predictions=False)
assert cb.write_on_batch_end.call_count == 4
assert cb.write_on_epoch_end.call_count == 0
DummyPredictionWriter.write_on_batch_end.reset_mock()
DummyPredictionWriter.write_on_epoch_end.reset_mock()
cb = DummyPredictionWriter("epoch")
trainer = Trainer(default_root_dir=tmp_path, logger=False, limit_predict_batches=4, callbacks=cb)
trainer.predict(model, dataloaders=dataloader, return_predictions=False)
assert cb.write_on_batch_end.call_count == 0
assert cb.write_on_epoch_end.call_count == 1
@pytest.mark.parametrize("num_workers", [0, 2])
def test_prediction_writer_batch_indices(num_workers, tmp_path):
DummyPredictionWriter.write_on_batch_end = Mock()
DummyPredictionWriter.write_on_epoch_end = Mock()
dataloader = DataLoader(RandomDataset(32, 64), batch_size=4, num_workers=num_workers)
model = BoringModel()
writer = DummyPredictionWriter("batch_and_epoch")
trainer = Trainer(default_root_dir=tmp_path, logger=False, limit_predict_batches=4, callbacks=writer)
trainer.predict(model, dataloaders=dataloader)
writer.write_on_batch_end.assert_has_calls([
call(trainer, model, ANY, [0, 1, 2, 3], ANY, 0, 0),
call(trainer, model, ANY, [4, 5, 6, 7], ANY, 1, 0),
call(trainer, model, ANY, [8, 9, 10, 11], ANY, 2, 0),
call(trainer, model, ANY, [12, 13, 14, 15], ANY, 3, 0),
])
writer.write_on_epoch_end.assert_has_calls([
call(trainer, model, ANY, [[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]]),
])
def test_batch_level_batch_indices(tmp_path):
"""Test that batch_indices are returned when `return_predictions=False`."""
DummyPredictionWriter.write_on_batch_end = Mock()
class CustomBoringModel(BoringModel):
def on_predict_epoch_end(self, *args, **kwargs):
assert self.trainer.predict_loop.epoch_batch_indices == [[]]
writer = DummyPredictionWriter("batch")
model = CustomBoringModel()
dataloader = DataLoader(RandomDataset(32, 64), batch_size=4)
trainer = Trainer(default_root_dir=tmp_path, logger=False, limit_predict_batches=4, callbacks=writer)
trainer.predict(model, dataloaders=dataloader, return_predictions=False)
writer.write_on_batch_end.assert_has_calls([
call(trainer, model, ANY, [0, 1, 2, 3], ANY, 0, 0),
call(trainer, model, ANY, [4, 5, 6, 7], ANY, 1, 0),
call(trainer, model, ANY, [8, 9, 10, 11], ANY, 2, 0),
call(trainer, model, ANY, [12, 13, 14, 15], ANY, 3, 0),
])
| DummyPredictionWriter |
python | tensorflow__tensorflow | tensorflow/python/data/ops/dataset_ops.py | {
"start": 185740,
"end": 186050
} | class ____(DatasetV2):
"""Abstract class representing a dataset with one input."""
def __init__(self, input_dataset: DatasetV2, variant_tensor):
self._input_dataset = input_dataset
super(UnaryDataset, self).__init__(variant_tensor)
def _inputs(self):
return [self._input_dataset]
| UnaryDataset |
python | tensorflow__tensorflow | tensorflow/python/framework/ops.py | {
"start": 5183,
"end": 7032
} | class ____(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function) -> None:
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def) -> str:
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
| _UserDeviceSpec |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/__init__.py | {
"start": 802,
"end": 5353
} | class ____(AutoSklearnChoice):
@classmethod
def get_components(cls):
components = OrderedDict()
components.update(_regressors)
components.update(additional_components.components)
return components
@classmethod
def get_available_components(
cls, dataset_properties=None, include=None, exclude=None
):
available_comp = cls.get_components()
components_dict = OrderedDict()
if dataset_properties is None:
dataset_properties = {}
if include is not None and exclude is not None:
raise ValueError(
"The argument include and exclude cannot be used together."
)
if include is not None:
for incl in include:
if incl not in available_comp:
raise ValueError(
"Trying to include unknown component: " "%s" % incl
)
for name in available_comp:
if include is not None and name not in include:
continue
elif exclude is not None and name in exclude:
continue
entry = available_comp[name]
# Avoid infinite loop
if entry == RegressorChoice:
continue
if entry.get_properties()["handles_regression"] is False:
continue
if (
dataset_properties.get("multioutput") is True
and entry.get_properties()["handles_multioutput"] is False
):
continue
components_dict[name] = entry
return components_dict
def get_hyperparameter_search_space(
self,
feat_type: FEAT_TYPE_TYPE,
dataset_properties=None,
default=None,
include=None,
exclude=None,
):
if include is not None and exclude is not None:
raise ValueError(
"The argument include and exclude cannot be used together."
)
cs = ConfigurationSpace()
# Compile a list of all estimator objects for this problem
available_estimators = self.get_available_components(
dataset_properties=dataset_properties, include=include, exclude=exclude
)
if len(available_estimators) == 0:
raise ValueError("No regressors found")
if default is None:
defaults = ["random_forest", "support_vector_regression"] + list(
available_estimators.keys()
)
for default_ in defaults:
if default_ in available_estimators:
if include is not None and default_ not in include:
continue
if exclude is not None and default_ in exclude:
continue
default = default_
break
estimator = CategoricalHyperparameter(
"__choice__", list(available_estimators.keys()), default_value=default
)
cs.add_hyperparameter(estimator)
for estimator_name in available_estimators.keys():
estimator_configuration_space = available_estimators[
estimator_name
].get_hyperparameter_search_space(
feat_type=feat_type, dataset_properties=dataset_properties
)
parent_hyperparameter = {"parent": estimator, "value": estimator_name}
cs.add_configuration_space(
estimator_name,
estimator_configuration_space,
parent_hyperparameter=parent_hyperparameter,
)
return cs
def estimator_supports_iterative_fit(self):
return hasattr(self.choice, "iterative_fit")
def get_max_iter(self):
if self.estimator_supports_iterative_fit():
return self.choice.get_max_iter()
else:
raise NotImplementedError()
def get_current_iter(self):
if self.estimator_supports_iterative_fit():
return self.choice.get_current_iter()
else:
raise NotImplementedError()
def iterative_fit(self, X, y, n_iter=1, **fit_params):
# Allows to use check_is_fitted on the choice object
self.fitted_ = True
if fit_params is None:
fit_params = {}
return self.choice.iterative_fit(X, y, n_iter=n_iter, **fit_params)
def configuration_fully_fitted(self):
return self.choice.configuration_fully_fitted()
| RegressorChoice |
python | encode__django-rest-framework | tests/test_relations_hyperlink.py | {
"start": 1383,
"end": 1565
} | class ____(serializers.HyperlinkedModelSerializer):
class Meta:
model = ManyToManySource
fields = ('url', 'name', 'targets')
# ForeignKey
| ManyToManySourceSerializer |
python | pytorch__pytorch | benchmarks/dynamo/huggingface.py | {
"start": 10835,
"end": 22042
} | class ____(BenchmarkRunner):
def __init__(self):
super().__init__()
self.suite_name = "huggingface"
@property
def _config(self):
return load_yaml_file("huggingface.yaml")
@property
def _skip(self):
return self._config["skip"]
@property
def _accuracy(self):
return self._config["accuracy"]
@property
def skip_models(self):
return self._skip["all"]
@property
def skip_models_for_cpu(self):
return self._skip["device"]["cpu"]
@property
def fp32_only_models(self):
return self._config["only_fp32"]
@property
def skip_models_due_to_control_flow(self):
return self._skip["control_flow"]
def use_larger_multiplier_for_smaller_tensor(self, name):
return name in [
"GPT2ForSequenceClassification",
]
def _get_model_cls_and_config(self, model_name):
if model_name not in EXTRA_MODELS:
model_cls = get_module_cls_by_model_name(model_name)
config_cls = model_cls.config_class
config = config_cls()
# NB: some models need a pad token defined to handle BS > 1
if (
model_cls
in [
GPT2ForSequenceClassification,
GPTNeoForSequenceClassification,
GPTJForSequenceClassification,
]
or model_cls.__name__.startswith("Roberta")
or model_cls.__name__.startswith("Marian")
):
config.pad_token_id = 0
else:
config, model_cls = EXTRA_MODELS[model_name]
return model_cls, config
@download_retry_decorator
def _download_model(self, model_name):
model_cls, config = self._get_model_cls_and_config(model_name)
if "auto" in model_cls.__module__:
# Handle auto classes
model = model_cls.from_config(config)
else:
model = model_cls(config)
return model
def load_model(
self,
device,
model_name,
batch_size=None,
extra_args=None,
):
is_training = self.args.training
use_eval_mode = self.args.use_eval_mode
dtype = torch.float32
reset_rng_state()
# Get batch size
if model_name in BATCH_SIZE_KNOWN_MODELS:
batch_size_default = BATCH_SIZE_KNOWN_MODELS[model_name]
elif batch_size is None:
batch_size_default = 16
log.info(
f"Batch size not specified for {model_name}. Setting batch_size=16" # noqa: G004
)
if batch_size is None:
batch_size = batch_size_default
batch_size_divisors = self._config["batch_size"]["divisors"]
if model_name in batch_size_divisors:
batch_size = max(int(batch_size / batch_size_divisors[model_name]), 1)
log.info(
f"Running smaller batch size={batch_size} for {model_name}, orig batch_size={batch_size_default}" # noqa: G004
)
# Get model and example inputs
if model_name in HF_LLM_MODELS:
benchmark_cls = HF_LLM_MODELS[model_name]
model, example_inputs = benchmark_cls.get_model_and_inputs(
model_name, device
)
# Set this flag so that when we test for speedup, we use
# model.generate instead of using model.forward
self.hf_llm = True
def generate(self, _, example_inputs, collect_outputs=True):
return model.generate(**example_inputs)
self.generate = types.MethodType(generate, self)
else:
self.hf_llm = False
model_cls, config = self._get_model_cls_and_config(model_name)
model = self._download_model(model_name)
model = model.to(device, dtype=dtype)
example_inputs = generate_inputs_for_model(
model_cls, model, model_name, batch_size, device, include_loss_args=True
)
# So we can check for correct gradients without eliminating the dropout computation
for attr in dir(config):
if "drop" in attr and isinstance(getattr(config, attr), float):
setattr(config, attr, 1e-30)
# Turning off kv cache for torchbench models. This is not the right
# thing to do, but the pt2 dashboard is outdated. Real transformers
# benchmarks will be added soon using a different infra.
if hasattr(model, "config") and hasattr(model.config, "use_cache"):
model.config.use_cache = False
if self.args.enable_activation_checkpointing:
model.gradient_checkpointing_enable()
if (
is_training
and not use_eval_mode
and not (
self.args.accuracy and model_name in self._config["only_inference"]
)
):
model.train()
else:
model.eval()
self.validate_model(model, example_inputs)
return device, model_name, model, example_inputs, batch_size
def iter_model_names(self, args):
model_names = list(BATCH_SIZE_KNOWN_MODELS.keys()) + list(EXTRA_MODELS.keys())
model_names = set(model_names)
model_names = sorted(model_names)
start, end = self.get_benchmark_indices(len(model_names))
for index, model_name in enumerate(model_names):
if index < start or index >= end:
continue
if (
not re.search("|".join(args.filter), model_name, re.IGNORECASE)
or re.search("|".join(args.exclude), model_name, re.IGNORECASE)
or model_name in args.exclude_exact
or model_name in self.skip_models
):
continue
yield model_name
@property
def skip_accuracy_checks_large_models_dashboard(self):
if self.args.dashboard or self.args.accuracy:
return self._accuracy["skip"]["large_models"]
return set()
@property
def get_output_amp_train_process_func(self):
return {}
def pick_grad(self, name, is_training):
if is_training:
return torch.enable_grad()
else:
return torch.no_grad()
def get_tolerance_and_cosine_flag(self, is_training, current_device, name):
cosine = self.args.cosine
if is_training:
from torch._inductor import config as inductor_config
if (name in self._config["tolerance"]["higher_training"]) or (
inductor_config.max_autotune
and name in self._config["tolerance"]["higher_max_autotune_training"]
):
return 2e-2, cosine
else:
return 1e-2, cosine
else:
if (
current_device == "cpu"
and name in self._config["tolerance"]["higher_inference_cpu"]
):
return 5e-3, cosine
if name in self._config["tolerance"]["higher_inference"]:
return 4e-3, cosine
return 1e-3, cosine
def compute_loss(self, pred):
return pred[0]
def forward_pass(self, mod, inputs, collect_outputs=True):
with self.autocast(**self.autocast_arg):
res = mod(**inputs)
return res.logits if self.hf_llm else res
def forward_and_backward_pass(self, mod, inputs, collect_outputs=True):
cloned_inputs = clone_inputs(inputs)
self.optimizer_zero_grad(mod)
with self.autocast(**self.autocast_arg):
pred = mod(**cloned_inputs)
loss = self.compute_loss(pred)
self.grad_scaler.scale(loss).backward()
self.optimizer_step()
if collect_outputs:
return collect_results(mod, None, loss, cloned_inputs)
return None
def refresh_model_names_and_batch_sizes():
"""
This function reads the HF Fx tracer supported models and finds the largest
batch size that could fit on the GPU with PyTorch eager.
The resulting data is written in huggingface_models_list.txt.
Note - We only need to run this function if we believe that HF Fx tracer now
supports more models.
"""
import transformers.utils.fx as hf_fx
family = {}
lm_seen = set()
family_seen = set()
for cls_name in hf_fx._SUPPORTED_MODELS:
if "For" not in cls_name:
continue
model_cls = get_module_cls_by_model_name(cls_name)
# TODO: AttributeError: '*Config' object has no attribute 'vocab_size'
if model_cls in [
CLIPModel,
CLIPVisionModel,
# SwinForImageClassification,
# SwinForImageClassification,
# SwinForMaskedImageModeling,
# SwinModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
]:
continue
# TODO: AssertionError: Padding_idx must be within num_embeddings
if model_cls in [MarianForCausalLM, MarianMTModel, MarianModel]:
continue
# TODO: "model is not supported yet" from HFTracer
if model_cls in [HubertForSequenceClassification]:
continue
# TODO: shape mismatch in loss calculation
if model_cls in [LxmertForQuestionAnswering]:
continue
family_name = cls_name.split("For")[0]
if family_name not in family:
family[family_name] = []
if cls_name.endswith(("MaskedLM", "CausalLM")) and family_name not in lm_seen:
family[family_name].append(cls_name)
lm_seen.add(family_name)
elif (
cls_name.endswith(
("SequenceClassification", "ConditionalGeneration", "QuestionAnswering")
)
and family_name not in family_seen
):
family[family_name].append(cls_name)
family_seen.add(family_name)
elif cls_name.endswith("ImageClassification"):
family[family_name].append(cls_name)
chosen_models = set()
for members in family.values():
chosen_models.update(set(members))
# Add the EXTRA_MODELS
chosen_models.update(set(EXTRA_MODELS.keys()))
for model_name in sorted(chosen_models):
try:
subprocess.check_call(
[sys.executable]
+ sys.argv
+ ["--find-batch-sizes"]
+ [f"--only={model_name}"]
+ [f"--output={MODELS_FILENAME}"]
)
except subprocess.SubprocessError:
log.warning(f"Failed to find suitable batch size for {model_name}") # noqa: G004
def huggingface_main():
# Code to refresh model names and batch sizes
# if "--find-batch-sizes" not in sys.argv:
# refresh_model_names_and_batch_sizes()
logging.basicConfig(level=logging.WARNING)
warnings.filterwarnings("ignore")
main(HuggingfaceRunner())
if __name__ == "__main__":
huggingface_main()
| HuggingfaceRunner |
python | encode__django-rest-framework | rest_framework/serializers.py | {
"start": 3396,
"end": 10611
} | class ____(Field):
"""
The BaseSerializer class provides a minimal class which may be used
for writing custom serializer implementations.
Note that we strongly restrict the ordering of operations/properties
that may be used on the serializer in order to enforce correct usage.
In particular, if a `data=` argument is passed then:
.is_valid() - Available.
.initial_data - Available.
.validated_data - Only available after calling `is_valid()`
.errors - Only available after calling `is_valid()`
.data - Only available after calling `is_valid()`
If a `data=` argument is not passed then:
.is_valid() - Not available.
.initial_data - Not available.
.validated_data - Not available.
.errors - Not available.
.data - Available.
"""
def __init__(self, instance=None, data=empty, **kwargs):
self.instance = instance
if data is not empty:
self.initial_data = data
self.partial = kwargs.pop('partial', False)
self._context = kwargs.pop('context', {})
kwargs.pop('many', None)
super().__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automatically create
# `ListSerializer` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super().__new__(cls, *args, **kwargs)
# Allow type checkers to make serializers generic.
def __class_getitem__(cls, *args, **kwargs):
return cls
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method implements the creation of a `ListSerializer` parent
class when `many=True` is used. You can customize it if you need to
control which keyword arguments are passed to the parent, and
which are passed to the child.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomListSerializer(*args, **kwargs)
"""
list_kwargs = {}
for key in LIST_SERIALIZER_KWARGS_REMOVE:
value = kwargs.pop(key, None)
if value is not None:
list_kwargs[key] = value
list_kwargs['child'] = cls(*args, **kwargs)
list_kwargs.update({
key: value for key, value in kwargs.items()
if key in LIST_SERIALIZER_KWARGS
})
meta = getattr(cls, 'Meta', None)
list_serializer_class = getattr(meta, 'list_serializer_class', ListSerializer)
return list_serializer_class(*args, **list_kwargs)
def to_internal_value(self, data):
raise NotImplementedError('`to_internal_value()` must be implemented.')
def to_representation(self, instance):
raise NotImplementedError('`to_representation()` must be implemented.')
def update(self, instance, validated_data):
raise NotImplementedError('`update()` must be implemented.')
def create(self, validated_data):
raise NotImplementedError('`create()` must be implemented.')
def save(self, **kwargs):
assert hasattr(self, '_errors'), (
'You must call `.is_valid()` before calling `.save()`.'
)
assert not self.errors, (
'You cannot call `.save()` on a serializer with invalid data.'
)
# Guard against incorrect use of `serializer.save(commit=False)`
assert 'commit' not in kwargs, (
"'commit' is not a valid keyword argument to the 'save()' method. "
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
"You can also pass additional keyword arguments to 'save()' if you "
"need to set extra attributes on the saved model instance. "
"For example: 'serializer.save(owner=request.user)'.'"
)
assert not hasattr(self, '_data'), (
"You cannot call `.save()` after accessing `serializer.data`."
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
)
validated_data = {**self.validated_data, **kwargs}
if self.instance is not None:
self.instance = self.update(self.instance, validated_data)
assert self.instance is not None, (
'`update()` did not return an object instance.'
)
else:
self.instance = self.create(validated_data)
assert self.instance is not None, (
'`create()` did not return an object instance.'
)
return self.instance
def is_valid(self, *, raise_exception=False):
assert hasattr(self, 'initial_data'), (
'Cannot call `.is_valid()` as no `data=` keyword argument was '
'passed when instantiating the serializer instance.'
)
if not hasattr(self, '_validated_data'):
try:
self._validated_data = self.run_validation(self.initial_data)
except ValidationError as exc:
self._validated_data = {}
self._errors = exc.detail
else:
self._errors = {}
if self._errors and raise_exception:
raise ValidationError(self.errors)
return not bool(self._errors)
@property
def data(self):
if hasattr(self, 'initial_data') and not hasattr(self, '_validated_data'):
msg = (
'When a serializer is passed a `data` keyword argument you '
'must call `.is_valid()` before attempting to access the '
'serialized `.data` representation.\n'
'You should either call `.is_valid()` first, '
'or access `.initial_data` instead.'
)
raise AssertionError(msg)
if not hasattr(self, '_data'):
if self.instance is not None and not getattr(self, '_errors', None):
self._data = self.to_representation(self.instance)
elif hasattr(self, '_validated_data') and not getattr(self, '_errors', None):
self._data = self.to_representation(self.validated_data)
else:
self._data = self.get_initial()
return self._data
@property
def errors(self):
if not hasattr(self, '_errors'):
msg = 'You must call `.is_valid()` before accessing `.errors`.'
raise AssertionError(msg)
return self._errors
@property
def validated_data(self):
if not hasattr(self, '_validated_data'):
msg = 'You must call `.is_valid()` before accessing `.validated_data`.'
raise AssertionError(msg)
return self._validated_data
# Serializer & ListSerializer classes
# -----------------------------------
| BaseSerializer |
python | html5lib__html5lib-python | html5lib/tests/support.py | {
"start": 2499,
"end": 4712
} | class ____(object):
def __init__(self, filename, newTestHeading="data", encoding="utf8"):
if encoding is None:
self.f = open(filename, mode="rb")
else:
self.f = codecs.open(filename, encoding=encoding)
self.encoding = encoding
self.newTestHeading = newTestHeading
def __iter__(self):
data = DefaultDict(None)
key = None
for line in self.f:
heading = self.isSectionHeading(line)
if heading:
if data and heading == self.newTestHeading:
# Remove trailing newline
data[key] = data[key][:-1]
yield self.normaliseOutput(data)
data = DefaultDict(None)
key = heading
data[key] = "" if self.encoding else b""
elif key is not None:
data[key] += line
if data:
yield self.normaliseOutput(data)
def isSectionHeading(self, line):
"""If the current heading is a test section heading return the heading,
otherwise return False"""
# print(line)
if line.startswith("#" if self.encoding else b"#"):
return line[1:].strip()
else:
return False
def normaliseOutput(self, data):
# Remove trailing newlines
for key, value in data.items():
if value.endswith("\n" if self.encoding else b"\n"):
data[key] = value[:-1]
return data
def convert(stripChars):
def convertData(data):
"""convert the output of str(document) to the format used in the testcases"""
data = data.split("\n")
rv = []
for line in data:
if line.startswith("|"):
rv.append(line[stripChars:])
else:
rv.append(line)
return "\n".join(rv)
return convertData
convertExpected = convert(2)
def errorMessage(input, expected, actual):
msg = ("Input:\n%s\nExpected:\n%s\nReceived\n%s\n" %
(repr(input), repr(expected), repr(actual)))
if sys.version_info[0] == 2:
msg = msg.encode("ascii", "backslashreplace")
return msg
| TestData |
python | getlogbook__logbook | src/logbook/ticketing.py | {
"start": 10725,
"end": 15881
} | class ____(BackendBase):
"""Implements a backend that writes into a MongoDB database."""
class _FixedTicketClass(Ticket):
@property
def ticket_id(self):
return self._id
class _FixedOccurrenceClass(Occurrence):
def __init__(self, db, row):
self.update_from_dict(json.loads(row["data"]))
self.db = db
self.time = row["time"]
self.ticket_id = row["ticket_id"]
self.occurrence_id = row["_id"]
# TODO: Update connection setup once PYTHON-160 is solved.
def setup_backend(self):
from pymongo import ASCENDING, DESCENDING
from pymongo.connection import Connection
try:
from pymongo.uri_parser import parse_uri
except ImportError:
from pymongo.connection import _parse_uri as parse_uri
from pymongo.errors import AutoReconnect
_connection = None
uri = self.options.pop("uri", "")
_connection_attempts = 0
parsed_uri = parse_uri(uri, Connection.PORT)
if type(parsed_uri) is tuple:
# pymongo < 2.0
database = parsed_uri[1]
else:
# pymongo >= 2.0
database = parsed_uri["database"]
# Handle auto reconnect signals properly
while _connection_attempts < 5:
try:
if _connection is None:
_connection = Connection(uri)
database = _connection[database]
break
except AutoReconnect:
_connection_attempts += 1
time.sleep(0.1)
self.database = database
# setup correct indexes
database.tickets.ensure_index([("record_hash", ASCENDING)], unique=True)
database.tickets.ensure_index([("solved", ASCENDING), ("level", ASCENDING)])
database.occurrences.ensure_index([("time", DESCENDING)])
def _order(self, q, order_by):
from pymongo import ASCENDING, DESCENDING
col = "%s" % (order_by[1:] if order_by[0] == "-" else order_by)
if order_by[0] == "-":
return q.sort(col, DESCENDING)
return q.sort(col, ASCENDING)
def _oid(self, ticket_id):
from pymongo.objectid import ObjectId
return ObjectId(ticket_id)
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
db = self.database
ticket = db.tickets.find_one({"record_hash": hash})
if not ticket:
doc = {
"record_hash": hash,
"level": record.level,
"channel": record.channel or "",
"location": "%s:%d" % (record.filename, record.lineno), # noqa: UP031
"module": record.module or "<unknown>",
"occurrence_count": 0,
"solved": False,
"app_id": app_id,
}
ticket_id = db.tickets.insert(doc)
else:
ticket_id = ticket["_id"]
db.tickets.update(
{"_id": ticket_id},
{
"$inc": {"occurrence_count": 1},
"$set": {"last_occurrence_time": record.time, "solved": False},
},
)
# We store occurrences in a seperate collection so that
# we can make it a capped collection optionally.
db.occurrences.insert(
{
"ticket_id": self._oid(ticket_id),
"app_id": app_id,
"time": record.time,
"data": json.dumps(data),
}
)
def count_tickets(self):
"""Returns the number of tickets."""
return self.database.tickets.count()
def get_tickets(self, order_by="-last_occurrence_time", limit=50, offset=0):
"""Selects tickets from the database."""
query = (
self._order(self.database.tickets.find(), order_by)
.limit(limit)
.skip(offset)
)
return [self._FixedTicketClass(self, obj) for obj in query]
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
self.database.tickets.update({"_id": self._oid(ticket_id)}, {"solved": True})
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
self.database.occurrences.remove({"ticket_id": self._oid(ticket_id)})
self.database.tickets.remove({"_id": self._oid(ticket_id)})
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
if ticket := self.database.tickets.find_one({"_id": self._oid(ticket_id)}):
return Ticket(self, ticket)
def get_occurrences(self, ticket, order_by="-time", limit=50, offset=0):
"""Selects occurrences from the database for a ticket."""
collection = self.database.occurrences
occurrences = (
self._order(collection.find({"ticket_id": self._oid(ticket)}), order_by)
.limit(limit)
.skip(offset)
)
return [self._FixedOccurrenceClass(self, obj) for obj in occurrences]
| MongoDBBackend |
python | pypa__hatch | src/hatch/project/constants.py | {
"start": 144,
"end": 524
} | class ____:
REQUESTED_TARGETS = "HATCH_BUILD_REQUESTED_TARGETS"
LOCATION = "HATCH_BUILD_LOCATION"
HOOKS_ONLY = "HATCH_BUILD_HOOKS_ONLY"
NO_HOOKS = "HATCH_BUILD_NO_HOOKS"
HOOKS_ENABLE = "HATCH_BUILD_HOOKS_ENABLE"
HOOK_ENABLE_PREFIX = "HATCH_BUILD_HOOK_ENABLE_"
CLEAN = "HATCH_BUILD_CLEAN"
CLEAN_HOOKS_AFTER = "HATCH_BUILD_CLEAN_HOOKS_AFTER"
| BuildEnvVars |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gcs/source_gcs/source.py | {
"start": 1009,
"end": 4350
} | class ____(FileBasedSource):
@classmethod
def read_config(cls, config_path: str) -> Mapping[str, Any]:
"""
Override the default read_config to transform the legacy config format
into the new one before validating it against the new spec.
"""
config = FileBasedSource.read_config(config_path)
if not cls._is_file_based_config(config):
parsed_legacy_config = SourceGCSSpec(**config)
converted_config = LegacyConfigTransformer.convert(parsed_legacy_config)
emit_configuration_as_airbyte_control_message(converted_config)
return converted_config
return config
@staticmethod
def _is_file_based_config(config: Mapping[str, Any]) -> bool:
return "streams" in config
def spec(self, *args: Any, **kwargs: Any) -> ConnectorSpecification:
return ConnectorSpecification(
documentationUrl=self.spec_class.documentation_url(),
connectionSpecification=self.spec_class.schema(),
advanced_auth=AdvancedAuth(
auth_flow_type=AuthFlowType.oauth2_0,
predicate_key=["credentials", "auth_type"],
predicate_value="Client",
oauth_config_specification=OAuthConfigSpecification(
complete_oauth_output_specification={
"type": "object",
"properties": {
"access_token": {"type": "string", "path_in_connector_config": ["credentials", "access_token"]},
"refresh_token": {"type": "string", "path_in_connector_config": ["credentials", "refresh_token"]},
},
},
complete_oauth_server_input_specification={
"type": "object",
"properties": {"client_id": {"type": "string"}, "client_secret": {"type": "string"}},
},
complete_oauth_server_output_specification={
"type": "object",
"properties": {
"client_id": {"type": "string", "path_in_connector_config": ["credentials", "client_id"]},
"client_secret": {"type": "string", "path_in_connector_config": ["credentials", "client_secret"]},
},
},
),
),
)
def _make_default_stream(
self,
stream_config: FileBasedStreamConfig,
cursor: Optional[AbstractFileBasedCursor],
parsed_config: AbstractFileBasedSpec,
) -> AbstractFileBasedStream:
return GCSStream(
config=stream_config,
catalog_schema=self.stream_schemas.get(stream_config.name),
stream_reader=self.stream_reader,
availability_strategy=self.availability_strategy,
discovery_policy=self.discovery_policy,
parsers=self.parsers,
validation_policy=self._validate_and_get_validation_policy(stream_config),
errors_collector=self.errors_collector,
cursor=cursor,
use_file_transfer=use_file_transfer(parsed_config),
preserve_directory_structure=preserve_directory_structure(parsed_config),
)
| SourceGCS |
python | huggingface__transformers | tests/generation/test_utils.py | {
"start": 3059,
"end": 122124
} | class ____:
input_name = "input_ids"
model_tester = None
max_new_tokens = 3
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# We don't want a few model inputs in our model input dictionary for generation tests
input_keys_to_ignore = [
# we don't want encoder-decoder models to start from filled decoder ids
"decoder_input_ids",
"decoder_attention_mask",
# we'll set cache use in each test differently
"use_cache",
# Ignore labels if it is in the input dict
"labels",
# model-specific exceptions should overload/overwrite this function
]
filtered_inputs_dict = {
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
for k, v in inputs_dict.items()
if k not in input_keys_to_ignore
}
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
text_gen_config = config.get_text_config(decoder=True)
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
text_gen_config.pad_token_id = (
text_gen_config.eos_token_id
if isinstance(text_gen_config.eos_token_id, int)
else text_gen_config.eos_token_id[0]
)
text_gen_config.eos_token_id = None
text_gen_config.forced_eos_token_id = None
return config, filtered_inputs_dict
def _get_logits_processor_kwargs(self, do_sample=False, config=None):
logits_processor_kwargs = {
"bad_words_ids": [[1, 0]],
"repetition_penalty": 1.2,
"remove_invalid_values": True,
}
if do_sample:
logits_processor_kwargs.update(
{
"top_k": 10,
"top_p": 0.7,
"temperature": 0.7,
}
)
# TODO (joao, raushan): see this comment for a long-term fix
# https://github.com/huggingface/transformers/pull/33593#issuecomment-2361824264)
# This is a band-aid for VLM models, to ensure they don't generate image/video tokens which would cause them
# to crash. On pretrained models this isn't a risk, as they are trained to not generate these tokens.
if config is not None:
for key in [
"image_token_id",
"video_token_id",
"audio_token_id",
"vision_start_token_id",
"audio_start_token_id",
"audio_end_token_id",
"vision_end_token_id",
]:
token_index = getattr(config, key, None)
if token_index is None and hasattr(self, "model_tester"):
token_index = getattr(self.model_tester, key, None)
if token_index is not None and token_index < config.get_text_config().vocab_size:
logits_processor_kwargs["bad_words_ids"].append([token_index])
return logits_processor_kwargs
def _get_beam_kwargs(self, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
}
return beam_kwargs
def _greedy_generate(
self,
model,
inputs_dict,
output_scores=False,
output_logits=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
use_cache=True,
):
logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config)
output_generate = model.generate(
do_sample=False,
num_beams=1,
max_new_tokens=self.max_new_tokens,
min_new_tokens=self.max_new_tokens,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
output_logits=output_logits,
return_dict_in_generate=return_dict_in_generate,
use_cache=use_cache,
**logits_processor_kwargs,
**inputs_dict,
)
return output_generate
def _sample_generate(
self,
model,
inputs_dict,
num_return_sequences,
output_scores=False,
output_logits=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
use_cache=True,
):
torch.manual_seed(0)
logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=True, config=model.config)
output_generate = model.generate(
do_sample=True,
num_beams=1,
max_new_tokens=self.max_new_tokens,
min_new_tokens=self.max_new_tokens,
num_return_sequences=num_return_sequences,
output_scores=output_scores,
output_logits=output_logits,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
use_cache=use_cache,
**logits_processor_kwargs,
**inputs_dict,
)
return output_generate
def _beam_search_generate(
self,
model,
inputs_dict,
beam_kwargs,
output_scores=False,
output_logits=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
use_cache=True,
):
logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config)
output_generate = model.generate(
do_sample=False,
max_new_tokens=self.max_new_tokens,
min_new_tokens=self.max_new_tokens,
output_scores=output_scores,
output_logits=output_logits,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
use_cache=use_cache,
**beam_kwargs,
**logits_processor_kwargs,
**inputs_dict,
)
return output_generate
def _beam_sample_generate(
self,
model,
inputs_dict,
beam_kwargs,
output_scores=False,
output_logits=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
use_cache=True,
):
torch.manual_seed(0)
logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=True, config=model.config)
output_generate = model.generate(
do_sample=True,
max_new_tokens=self.max_new_tokens,
min_new_tokens=self.max_new_tokens,
output_scores=output_scores,
output_logits=output_logits,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
use_cache=use_cache,
**beam_kwargs,
**logits_processor_kwargs,
**inputs_dict,
)
return output_generate
@pytest.mark.generate
def test_greedy_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
model = model_class(config).to(torch_device).eval()
output_generate = self._greedy_generate(model=model, inputs_dict=inputs_dict)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1])
@pytest.mark.generate
def test_greedy_generate_dict_outputs(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if self.has_attentions:
config._attn_implementation = "eager" # can't output attentions otherwise
model = model_class(config).to(torch_device).eval()
output_generate = self._greedy_generate(
model=model,
inputs_dict=inputs_dict,
output_scores=True,
output_logits=True,
output_hidden_states=True,
output_attentions=self.has_attentions,
return_dict_in_generate=True,
use_cache=False,
)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1)
self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput)
else:
self.assertTrue(
output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]
)
self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput)
self._check_generate_outputs(output_generate, model.config)
@pytest.mark.generate
def test_greedy_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if self.has_attentions:
config._attn_implementation = "eager" # can't output attentions otherwise
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
if any(model_name in model_class.__name__.lower() for model_name in ["rwkv"]):
self.skipTest(reason="Won't fix: model with non-standard dictionary output shapes")
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_generate = self._greedy_generate(
model=model,
inputs_dict=inputs_dict,
output_scores=True,
output_logits=True,
output_hidden_states=True,
output_attentions=self.has_attentions,
return_dict_in_generate=True,
use_cache=True, # Enable cache
)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1)
else:
self.assertTrue(
output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]
)
self._check_generate_outputs(output_generate, model.config, use_cache=True)
@pytest.mark.generate
def test_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
model = model_class(config).to(torch_device).eval()
output_generate = self._sample_generate(model=model, inputs_dict=inputs_dict, num_return_sequences=1)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1])
@pytest.mark.generate
def test_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if self.has_attentions:
config._attn_implementation = "eager" # can't output attentions otherwise
model = model_class(config).to(torch_device).eval()
output_generate = self._sample_generate(
model=model,
inputs_dict=inputs_dict,
num_return_sequences=2,
output_scores=True,
output_logits=True,
output_hidden_states=True,
output_attentions=self.has_attentions,
return_dict_in_generate=True,
use_cache=False,
)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1)
self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput)
else:
self.assertTrue(
output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]
)
self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput)
self._check_generate_outputs(output_generate, model.config, num_return_sequences=2)
@pytest.mark.generate
def test_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
model = model_class(config).to(torch_device).eval()
beam_kwargs = self._get_beam_kwargs()
output_generate = self._beam_search_generate(model=model, inputs_dict=inputs_dict, beam_kwargs=beam_kwargs)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1])
@pytest.mark.generate
def test_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if self.has_attentions:
config._attn_implementation = "eager" # can't output attentions otherwise
model = model_class(config).to(torch_device).eval()
beam_kwargs = self._get_beam_kwargs()
output_generate = self._beam_search_generate(
model=model,
inputs_dict=inputs_dict,
beam_kwargs=beam_kwargs,
output_scores=True,
output_logits=True,
output_hidden_states=True,
output_attentions=self.has_attentions,
return_dict_in_generate=True,
use_cache=False,
)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1)
self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput)
else:
self.assertTrue(
output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]
)
self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput)
self._check_generate_outputs(
output_generate,
model.config,
num_return_sequences=beam_kwargs["num_return_sequences"],
num_beams=beam_kwargs["num_beams"],
)
@pytest.mark.generate
def test_beam_search_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
if any(model_name in model_class.__name__.lower() for model_name in ["rwkv"]):
self.skipTest(reason="Won't fix: model with non-standard dictionary output shapes")
if self.has_attentions:
config._attn_implementation = "eager" # can't output attentions otherwise
model = model_class(config).to(torch_device).eval()
beam_kwargs = self._get_beam_kwargs()
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_generate = self._beam_search_generate(
model=model,
inputs_dict=inputs_dict,
beam_kwargs=beam_kwargs,
output_scores=True,
output_logits=True,
output_hidden_states=True,
output_attentions=self.has_attentions,
return_dict_in_generate=True,
use_cache=True, # Enable cache
)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1)
else:
self.assertTrue(
output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]
)
self._check_generate_outputs(
output_generate,
model.config,
use_cache=True,
num_return_sequences=beam_kwargs["num_return_sequences"],
num_beams=beam_kwargs["num_beams"],
)
@require_accelerate
@require_torch_multi_accelerator
@pytest.mark.generate
def test_model_parallel_beam_search(self):
if "xpu" in torch_device:
if not (is_ipex_available("2.5") or version.parse(torch.__version__) >= version.parse("2.6")):
self.skipTest(reason="device_map='auto' does not work with XPU devices")
for model_class in self.all_generative_model_classes:
if model_class._no_split_modules is None:
continue
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
model = model_class(config).eval()
with tempfile.TemporaryDirectory() as tmp_dir:
model.cpu().save_pretrained(tmp_dir)
new_model = model_class.from_pretrained(tmp_dir, device_map="auto")
new_model.generate(
max_new_tokens=self.max_new_tokens,
num_beams=2,
**inputs_dict,
)
@pytest.mark.generate
def test_beam_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
model = model_class(config).to(torch_device).eval()
beam_kwargs = self._get_beam_kwargs()
output_generate = self._beam_sample_generate(
model=model,
inputs_dict=inputs_dict,
beam_kwargs=beam_kwargs,
)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1])
@pytest.mark.generate
def test_beam_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if self.has_attentions:
config._attn_implementation = "eager" # can't output attentions otherwise
model = model_class(config).to(torch_device).eval()
beam_kwargs = self._get_beam_kwargs()
output_generate = self._beam_sample_generate(
model=model,
inputs_dict=inputs_dict,
beam_kwargs=beam_kwargs,
output_scores=True,
output_logits=True,
output_hidden_states=True,
output_attentions=self.has_attentions,
return_dict_in_generate=True,
use_cache=False,
)
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1)
self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput)
else:
self.assertTrue(
output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]
)
self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput)
self._check_generate_outputs(
output_generate,
model.config,
num_return_sequences=beam_kwargs["num_return_sequences"],
num_beams=beam_kwargs["num_beams"],
)
@pytest.mark.generate
def test_generate_without_input_ids(self):
config, _ = self.prepare_config_and_inputs_for_generate()
# if no bos token id => cannot generate from None
if config.bos_token_id is None:
self.skipTest(reason="bos_token_id is None")
# hack in case they are equal, otherwise the attn mask will be [0]
if config.bos_token_id == config.pad_token_id:
config.pad_token_id = None
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
output_ids_generate = model.generate(
do_sample=False, max_new_tokens=self.max_new_tokens, remove_invalid_values=True
)
self.assertIsNotNone(output_ids_generate)
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
# This test ensures that the assisted generation does not introduce output changes over greedy search.
# See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 for more info.
# NOTE: It breaks the pattern in the tests above, for multiple reasons:
# - assisted_decoding, contrarily to the other methods, can't be called on its own (e.g. needs to
# prepare the assistant encoder outputs in the main generate body);
# - assisted_decoding does not support `use_cache = False`
# - assisted_decoding does not support `batch_size > 1`
for model_class in self.all_generative_model_classes:
if model_class._is_stateful:
self.skipTest(reason="Stateful models don't support assisted generation")
if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]):
self.skipTest(reason="Won't fix: old model with different cache format")
if any(
model_name in model_class.__name__.lower()
for model_name in [
"moshi",
"git",
"prophetnet",
"mllama", # special cache sizes
"blip2", # overridden `generate()` all BLIP models
"instructblip",
"instructblipvideo",
]
):
self.skipTest(reason="May fix in the future: need model-specific fixes")
# enable cache
config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=1)
set_config_for_less_flaky_test(config)
# force eager attention to support output attentions
if self.has_attentions:
config._attn_implementation = "eager"
# NOTE: assisted generation only works with cache on at the moment.
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
config.is_decoder = True
model = model_class._from_config(config, attn_implementation="eager").to(torch_device).eval()
set_model_for_less_flaky_test(model)
config = model.config
# Sets assisted generation arguments such that:
# a) no EOS is generated, to ensure generation doesn't break early
# b) the assistant model always generates two tokens when it is called, to ensure the input preparation of
# the assistant model is correct
# c) there are at least two forward passes in the main model, to ensure the input preparation of
# the main model is correct
# d) use a cache type compatible with rollbacks (only dynamic cache atm). Otherwise, there may be
# differences vs model-specific default cache
generation_kwargs = {
"eos_token_id": -1, # see a)
"max_new_tokens": 4, # see c)
"num_beams": 1,
"do_sample": False,
"output_scores": True,
"output_logits": True,
"output_hidden_states": True,
"output_attentions": self.has_attentions,
"return_dict_in_generate": True,
"use_cache": True,
"cache_implementation": "dynamic_full", # see d)
}
logits_processor_kwargs = self._get_logits_processor_kwargs(config=model.config)
output_greedy = model.generate(**generation_kwargs, **inputs_dict, **logits_processor_kwargs)
# test with the same assistant model or randomly init one
# in the first case all candidate tokens are accepted, in the second none is accepted
# case when some are accepted and some not is hard to reproduce, so let's hope this catches most errors :)
if assistant_type == "random":
assistant_model = model_class(config).to(torch_device).eval()
else:
assistant_model = model
assistant_model.config._attn_implementation = "eager"
assistant_model.generation_config.num_assistant_tokens = 2 # see b)
assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b)
generation_kwargs.update({"assistant_model": assistant_model})
output_assisted = model.generate(**generation_kwargs, **inputs_dict, **logits_processor_kwargs)
# default values of `has_similar_generate_outputs`
atol, rtol = 1e-5, 1e-5
# `gpt_oss` seems to have larger differences on CPU every other generated tokens, sth. like
# 1e-9, 1e-5, 1e-9, 1e-5. While on GPU, they are all very small 1e-9.
if model.config.model_type == "gpt_oss" and torch_device == "cpu":
atol, rtol = 1e-4, 1e-4
# The two outputs must match and their shape must be as expected
self.assertTrue(has_similar_generate_outputs(output_greedy, output_assisted, atol=atol, rtol=rtol))
for output in (output_greedy, output_assisted):
self._check_generate_outputs(output, model.config, use_cache=True)
@pytest.mark.generate
def test_prompt_lookup_decoding_matches_greedy_search(self):
# This test ensures that the prompt lookup generation does not introduce output changes over greedy search.
# This test is mostly a copy of test_assisted_decoding_matches_greedy_search
for model_class in self.all_generative_model_classes:
if model_class._is_stateful:
self.skipTest(reason="Stateful models don't support assisted generation")
old_models = [ # models that we won't commit resources fixing because they are old and have little usage
# reformer: has a different cache format
"reformer",
# imagegpt: the output lm head uses `vocab_size - 1` tokens, so the `NoBadWordsLogitsProcessor` used
# by prompt lookup may fail
"imagegpt",
]
if any(model_name in model_class.__name__.lower() for model_name in old_models):
self.skipTest(reason="Won't fix: old model")
if any(
model_name in model_class.__name__.lower()
for model_name in [
"moshi",
"git",
"prophetnet",
"mllama", # special cache sizes
"blip2", # overridden `generate()` for all BLIP models
"instructblip",
"instructblipvideo",
]
):
self.skipTest(reason="May fix in the future: need model-specific fixes")
# enable cache
config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=1)
# force eager attention to support output attentions
if self.has_attentions:
config._attn_implementation = "eager"
# NOTE: assisted generation only works with cache on at the moment.
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
# Sets assisted generation arguments such that:
# a) no EOS is generated, to ensure generation doesn't break early
# b) the prompt lookup tries to give the model 2 tokens, to ensure the input preparation of
# prompt lookup is correct
# c) there are at least two forward passes in the main model, to ensure the input preparation of
# the main model is correct
# d) use a cache type compatible with rollbacks (only dynamic cache atm). Otherwise, there may be
# differences vs model-specific default cache
generation_kwargs = {
"eos_token_id": -1, # see a)
"max_new_tokens": 4, # see c)
"num_beams": 1,
"do_sample": False,
"output_scores": True,
"output_logits": True,
"output_hidden_states": True,
"output_attentions": self.has_attentions,
"return_dict_in_generate": True,
"use_cache": True,
"cache_implementation": "dynamic_full", # see d)
}
logits_processor_kwargs = self._get_logits_processor_kwargs(config=model.config)
output_greedy = model.generate(**generation_kwargs, **inputs_dict, **logits_processor_kwargs)
generation_kwargs.update({"prompt_lookup_num_tokens": 2}) # see b)
output_prompt_lookup = model.generate(**generation_kwargs, **inputs_dict, **logits_processor_kwargs)
# The two outputs must match and their shape must be as expected
self.assertTrue(has_similar_generate_outputs(output_greedy, output_prompt_lookup))
for output in (output_greedy, output_prompt_lookup):
self._check_generate_outputs(output, model.config, use_cache=True)
@pytest.mark.generate
def test_assisted_decoding_sample(self):
# In this test we don't check assisted vs non-assisted output -- seeded assisted decoding with sample will not
# match sample for the same seed, as the forward pass does not return the exact same logits (due to matmul with
# different shapes, see https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535).
for model_class in self.all_generative_model_classes:
if model_class._is_stateful:
self.skipTest(reason="Stateful models don't support assisted generation")
if any(model_name in model_class.__name__.lower() for model_name in ["reformer"]):
self.skipTest(reason="Won't fix: old model with different cache format")
if any(
model_name in model_class.__name__.lower()
for model_name in [
"moshi",
"git",
"prophetnet",
"mllama", # special cache sizes
"blip2", # overridden `generate()` for all BLIP models
"instructblip",
"instructblipvideo",
]
):
self.skipTest(reason="May fix in the future: need model-specific fixes")
# enable cache
config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=1)
# force eager attention to support output attentions
if self.has_attentions:
config._attn_implementation = "eager"
# NOTE: assisted generation only works with cache on at the moment.
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
config.is_decoder = True
model = model_class._from_config(config, attn_implementation="eager").to(torch_device).eval()
config = model.config
# Sets assisted generation arguments such that:
# a) no EOS is generated, to ensure generation doesn't break early
# b) the assistant model always generates two tokens when it is called, to ensure the input preparation of
# the assistant model is correct
# c) there are at least two forward passes in the main model, to ensure the input preparation of
# the main model is correct
# d) use a cache type compatible with rollbacks (only dynamic cache atm). Otherwise, there may be
# differences vs model-specific default cache
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2 # see b)
assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b)
generation_kwargs = {
"eos_token_id": -1, # see a)
"max_new_tokens": 4, # see c)
"num_beams": 1,
"do_sample": True,
"assistant_model": assistant_model,
"output_scores": True,
"output_logits": True,
"output_hidden_states": True,
"output_attentions": self.has_attentions,
"return_dict_in_generate": True,
"use_cache": True,
"cache_implementation": "dynamic_full", # see d)
}
logits_processor_kwargs = self._get_logits_processor_kwargs(config=model.config)
output_assisted = model.generate(**generation_kwargs, **inputs_dict, **logits_processor_kwargs)
self._check_generate_outputs(output_assisted, config, use_cache=True)
@pytest.mark.generate
def test_prompt_lookup_decoding_stops_at_eos(self):
# This test ensures that the prompt lookup generation stops at eos token and does not suggest more tokens
# (see https://github.com/huggingface/transformers/pull/31301)
# The main idea is to have an ngram (unigram in our case) that is repeated twice in the input ids.
# First time at the very end, so input ends with the unigrams, and second any arbitrary location.
# Also, we need an EOS token which will be injected just after the arbitrary located ngram.
# We verify that PLD will not copy and propose candidated that contain an EOS token, even if there are overlapping ngrams
# in input ids. Otherwise a proposed EOS along with the trailing (ngrams-1) tokens might be accepted by the target model.
# That seems as if the model "generated" and EOS but didn't stop from user's perspective
input_ids = torch.randint(1, 50, (1, 10), device=torch_device) # generate inputs in range from 1-50
arbitrary_ngram = 51 # this is the arbitrary ngram, specifically chosen OOV to prevent flaky tests
input_ids[:, 3] = arbitrary_ngram # set pre-eos to arbitrary_ngram which is for sure not present in inputs
input_ids[:, -1] = arbitrary_ngram # put arbitrary_ngram in the end for the necessary match to happen
eos_token_id = torch.tensor([0], device=torch_device)
input_ids[:, 4] = eos_token_id # inject eos-token-id in input ids so that it is located after arbitrary_ngram
# init cand geenerator with max_matching_ngram_size=1 to match per-token
candidate_generator = PromptLookupCandidateGenerator(
eos_token_id=eos_token_id, num_output_tokens=4, max_matching_ngram_size=1
)
output_prompt_lookup = candidate_generator.get_candidates(input_ids)[0]
# PLD shouldn't propose any new tokens based on eos-match
self.assertTrue(output_prompt_lookup.shape[-1] == 10)
@pytest.mark.generate
def test_left_padding_compatibility(
self, unpadded_custom_inputs: dict | None = None, padded_custom_inputs: dict | None = None
):
"""
Tests that adding left-padding yields the same logits as the original input. Exposes arguments for custom
inputs for overwrites, to prevent full rewrites of the test when all we need is model-specific input handling.
! If you overwrite this test, make sure to document why you need to overwrite it !
NOTE: left-padding results in small numerical differences. This is expected.
See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535
Args:
unpadded_custom_inputs (`dict`, *optional*):
Used in test overwrites. Custom inputs to add/overwrite over the default test inputs.
padded_custom_inputs (`dict`, *optional*):
Used in test overwrites. Custom inputs to add/overwrite over the padded test input handcrafted in this
test. Commonly used e.g. with multimodal cross attention masks.
"""
# First, filter out models that don't support left padding
# 1. The model must support padding
if not self.has_attentions:
self.skipTest(reason="This model doesn't support padding.")
# 2. [encoder-decoder] The model must be a decoder-only architecture. Encoder-based architectures can use
# right-padding in their (encoder) inputs. Encoder-decoder may use left-padding on their decoder inputs
# [TODO: lift this restriction? technically, we can test padding the decoder inputs.]
decoder_only_classes = []
for model_class in self.all_generative_model_classes:
config, _ = self.prepare_config_and_inputs_for_generate()
if config.is_encoder_decoder:
continue
else:
decoder_only_classes.append(model_class)
if len(decoder_only_classes) == 0:
self.skipTest(reason="No decoder-only architecture available for this model.")
# 3. [old models] Decoder-only architectures derived from encoder-decoder models could support it in theory,
# but we haven't added support for it yet. We skip these models for now.
has_encoder_attributes = any(
attr_name
for attr_name in config.to_dict()
if attr_name.startswith("encoder") and attr_name != "encoder_no_repeat_ngram_size"
)
if has_encoder_attributes:
self.skipTest(
reason="The decoder-only derived from encoder-decoder models are not expected to support left-padding."
)
# Now we can start testing
unpadded_custom_inputs = unpadded_custom_inputs or {}
padded_custom_inputs = padded_custom_inputs or {}
def _prepare_model_kwargs(model_inputs, signature):
model_kwargs = {"input_ids": model_inputs["input_ids"], "attention_mask": model_inputs["attention_mask"]}
if "position_ids" in signature:
position_ids = torch.cumsum(model_inputs["attention_mask"], dim=-1) - 1
position_ids.masked_fill_(model_inputs["attention_mask"] == 0, 1)
model_kwargs["position_ids"] = position_ids
if "cache_position" in signature:
cache_position = torch.arange(model_inputs["input_ids"].shape[1], device=torch_device)
model_kwargs["cache_position"] = cache_position
# forward all other inputs, if they are in the signature
model_kwargs.update({k: v for k, v in model_inputs.items() if k not in model_kwargs and k in signature})
return model_kwargs
for model_class in decoder_only_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
model = model_class(config).to(torch_device).eval()
signature = inspect.signature(model.forward).parameters.keys()
# No cache to simplify the test (some models need careful init)
model.generation_config.use_cache = False
inputs_dict.update(unpadded_custom_inputs)
# special case: an inexistent `attention_mask` is a full mask
inputs_dict["attention_mask"] = inputs_dict.get("attention_mask", None)
if inputs_dict["attention_mask"] is None:
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["input_ids"])
# Get output logits from inputs without padding
model_kwargs_wo_padding = _prepare_model_kwargs(inputs_dict, signature)
next_logits_wo_padding = model(**model_kwargs_wo_padding).logits[:, -1, :]
# Prepare padding on common inputs (pad length 32)
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
token_type_ids = inputs_dict.get("token_type_ids", None)
pad_token_id = getattr(config.get_text_config(decoder=True), "pad_token_id", None) or 0
pad_size = (input_ids.shape[0], 32, *input_ids.shape[2:])
padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * pad_token_id
padded_input_ids = torch.cat((padding, input_ids), dim=1)
padded_attention_mask = torch.cat(
(torch.zeros(pad_size[:2], dtype=input_ids.dtype, device=torch_device), attention_mask), dim=1
)
if token_type_ids is not None:
padded_token_type_ids = torch.cat(
(
# Assumption: `0` is a good default value for padding token type ids
torch.zeros(pad_size[:2], dtype=input_ids.dtype, device=torch_device),
token_type_ids,
),
dim=1,
)
else:
padded_token_type_ids = None
# Get output logits from inputs with left-padding (pad length 32)
padded_inputs_dict = copy.deepcopy(inputs_dict)
padded_inputs_dict["input_ids"] = padded_input_ids
padded_inputs_dict["attention_mask"] = padded_attention_mask
if padded_token_type_ids is not None:
padded_inputs_dict["token_type_ids"] = padded_token_type_ids
padded_inputs_dict.update(padded_custom_inputs)
model_kwargs_with_padding = _prepare_model_kwargs(padded_inputs_dict, signature)
next_logits_with_padding = model(**model_kwargs_with_padding).logits[:, -1, :]
# They should result in very similar logits
torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
@pytest.mark.generate
def test_past_key_values_format(self):
"""
Test that the KV cache is formatted correctly.
Having a standard KV cache format is important for a consistent API (and for advanced generation methods).
"""
for model_class in self.all_generative_model_classes:
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, skip the test
decoder_config = config.get_text_config(decoder=True)
if not hasattr(decoder_config, "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
model = model_class(config).to(torch_device)
model = model.eval()
if "use_cache" not in inputs:
inputs["use_cache"] = True
outputs = model(**inputs)
if "past_key_values" not in outputs:
self.skipTest(reason="This model doesn't return `past_key_values`")
cache = outputs["past_key_values"]
if config.is_encoder_decoder:
batch_size, seq_length = inputs["decoder_input_ids"].shape[:2]
else:
batch_size, seq_length = inputs["input_ids"].shape[:2]
# Check the format
self._check_past_key_values_for_generate(batch_size, cache, seq_length, decoder_config)
@pytest.mark.generate
def test_generate_from_random_inputs_embeds(self):
"""
Text-only: Tests that different `inputs_embeds` generate different outputs in models with `main_input=="input_ids"`.
Some models have 'images' as main input and thus can't generate with random text embeddings.
See `test_generate_from_inputs_embeds` for more general checks.
"""
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if config.is_encoder_decoder:
continue
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters:
continue
# No easy fix, let's skip the test for now
has_complex_embeds_computation = any(
model_name in model_class.__name__.lower() for model_name in ["moshi"]
)
if model_class.main_input_name != "input_ids" or has_complex_embeds_computation:
self.skipTest(
"The model's main input name in not `input_ids` and we need kwargs from input dict as well."
)
if hasattr(config, "scale_embedding"):
config.scale_embedding = False
generation_kwargs = {
"return_dict_in_generate": True,
"output_scores": True,
"do_sample": False,
"max_new_tokens": 5,
"min_new_tokens": 5, # generate exactly 5 tokens
}
input_ids = inputs_dict.pop("input_ids")
inputs_embeds = model.get_input_embeddings()(input_ids)
outputs_from_embeds = model.generate(input_ids, inputs_embeds=inputs_embeds, **generation_kwargs)
# If we pass different inputs_embeds, we should get different outputs (the output text may be the
# same, but the logits will almost surely be different)
random_embeds = torch.rand_like(inputs_embeds)
outputs_from_rand_embeds = model.generate(
input_ids=input_ids, inputs_embeds=random_embeds, **generation_kwargs
)
for i in range(len(outputs_from_rand_embeds.scores)):
self.assertFalse(torch.allclose(outputs_from_embeds.scores[i], outputs_from_rand_embeds.scores[i]))
@pytest.mark.generate
@parameterized.expand([("greedy", 1), ("beam search", 2)])
def test_generate_from_inputs_embeds(self, _, num_beams):
"""Tests that we can generate from `inputs_embeds` instead of `input_ids` in LLMs, VLMs, etc"""
# When supported, tests that the decoder model can generate from `inputs_embeds` instead of `input_ids`
# if fails, you should probably update the `prepare_inputs_for_generation` function
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
# This test is for decoder-only models (encoder-decoder models have native input embeddings support in the
# decoder)
if config.is_encoder_decoder:
continue
config.is_decoder = True
set_config_for_less_flaky_test(config)
# Skip models without explicit support
model = model_class(config).to(torch_device).eval()
set_model_for_less_flaky_test(model)
if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters:
continue
# There are a few exception patterns in this test:
# 1 - Complex `inputs_embeds` computation, i.e. the correct computation of inputs embeds is more complex
# than calling the embedding layer with `input_ids`. Subcases of this exception:
# 1.A - Ignore `scale_embedding`, if the model supports it (it is controlled by a model-dependent flag)
if hasattr(config, "scale_embedding"):
config.scale_embedding = False
# HACK - in the case of granite speech, input_features and inputs_embeds are mutually exclusive;
# this is similar to VLMs and should likely be standardized for similar audio models in the future,
# then made generic here.
if "granitespeech" in model_class.__name__.lower():
inputs_dict.pop("input_features", None)
# 1.B - No easy fix, let's skip the check that compares the outputs from `input_ids` and `inputs_embeds`
has_complex_embeds_computation = any(
model_name in model_class.__name__.lower() for model_name in ["moshi"]
)
# 2 - `inputs_dict` doesn't contain `attention_mask`. When `attention_mask` is not passed to generate,
# we infer it from `input_ids`. The last test case will fail if there is a pad token in the original input.
missing_attention_mask = "attention_mask" not in inputs_dict
# Traditional way of generating text
input_ids = inputs_dict.pop("input_ids")
generation_kwargs = {
"return_dict_in_generate": True,
"output_scores": True,
"num_beams": num_beams,
"do_sample": False,
"max_new_tokens": 5,
"min_new_tokens": 5, # generate exactly 5 tokens
"use_cache": True,
}
outputs_from_ids = model.generate(input_ids=input_ids, **generation_kwargs, **inputs_dict)
self.assertEqual(outputs_from_ids.sequences.shape[:2], (input_ids.shape[0], input_ids.shape[1] + 5))
# Same thing, but from input embeddings (`input_ids` is passed so the prompt is present in the output).
# The output of the two calls should be the same.
inputs_embeds = model.get_input_embeddings()(input_ids)
outputs_from_embeds = model.generate(
input_ids=input_ids, inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict
)
if not has_complex_embeds_computation:
self.assertTrue(has_similar_generate_outputs(outputs_from_ids, outputs_from_embeds))
# input_ids is not a required input on most models -- if we don't pass it, the newly generated tokens will
# be the same
if not missing_attention_mask:
outputs_from_embeds_wo_ids = model.generate(
inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict
)
outputs_from_embeds.sequences = outputs_from_embeds.sequences[:, inputs_embeds.shape[1] :]
self.assertTrue(has_similar_generate_outputs(outputs_from_embeds_wo_ids, outputs_from_embeds))
@pytest.mark.generate
def test_generate_from_inputs_embeds_with_static_cache(self):
"""
Test that StaticCache can generate from inputs_embeds and calculates max_cache_length
correctly in `generate()`. We force the model to not stop generation until max-length is reached
to verify that the cache length is indeed set correctly and we don't run out of index when slicing the cache.
"""
for model_class in self.all_generative_model_classes:
# Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly
# use a static cache because they don't create the causal masks correctly.
# TODO: cyril -> relax this by adding a `_support_static_cache` attribute
if not model_class._can_compile_fullgraph:
self.skipTest(reason="This model does not support the static cache format")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if config.is_encoder_decoder:
self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache")
model = model_class(config).to(torch_device).eval()
if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters:
self.skipTest(reason="This model does not support `inputs_embeds` in generation")
input_ids = inputs_dict.pop("input_ids")
model.config.use_cache = True
model.config.is_decoder = True
batch_size = input_ids.shape[0]
max_new_tokens = 10
# here we force to not stop at eos and go until max-length
model.generation_config.eos_token_id = model.config.get_text_config().eos_token_id = -1
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"cache_implementation": "static",
"return_dict_in_generate": True, # Required to return `past_key_values`
}
text_config = model.config.get_text_config()
head_dim = (
getattr(text_config, "head_dim", None) or text_config.hidden_size // text_config.num_attention_heads
)
num_key_value_heads = (
text_config.num_attention_heads
if getattr(text_config, "num_key_value_heads", None) is None
else text_config.num_key_value_heads
)
num_hidden_layers = text_config.num_hidden_layers
inputs_embeds = model.get_input_embeddings()(input_ids)
outputs = model.generate(inputs_embeds=inputs_embeds, **generation_kwargs, **inputs_dict)
# we should get `max_length - 1` in shape, not `max_length - embeds_length`.
# -1 because the last generated token isn't yet in the cache.
max_length = max_new_tokens + inputs_embeds.shape[1] - 1
cache_shape = [batch_size, num_key_value_heads, max_length, head_dim]
self.assertIsInstance(outputs.past_key_values, StaticCache)
self.assertEqual(len(outputs.past_key_values), num_hidden_layers)
self.assertListEqual(list(outputs.past_key_values.layers[0].keys.shape), cache_shape)
@pytest.mark.generate
def test_generate_continue_from_past_key_values(self):
# Tests that we can continue generating from past key values, returned from a previous `generate` call
for model_class in self.all_generative_model_classes:
if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt", "mllama"]):
self.skipTest(reason="Won't fix: old model with unique inputs/caches/other")
if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]):
self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility")
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
# Let's make it always:
# 1. use cache (for obvious reasons)
# 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which
# would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the
# continuation would force it to generate beyond an EOS token)
# 3. ignore `token_type_ids` for simplicity
# 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is
# active by default on some models
# 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When
# we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents
# repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls
# with cache, what is considered a prompt is different in the two cases.
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
model = model_class(config).to(torch_device)
model.eval()
# If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format)
outputs = model(**inputs)
if "past_key_values" not in outputs:
self.skipTest(reason="This model doesn't return `past_key_values`")
generate_kwargs = {
"pad_token_id": -1,
"eos_token_id": -1,
"forced_eos_token_id": None,
"encoder_no_repeat_ngram_size": 0,
"use_cache": True,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
}
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values
outputs = model.generate(**inputs, **generate_kwargs, max_new_tokens=4)
# Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the
# inputs may need to be tweaked across `generate` calls (like the attention mask).
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=3)
# Continue from the tokens generated above, preparing the inputs accordingly
inputs["past_key_values"] = outputs_cached.past_key_values
new_attention_len = outputs_cached.sequences.shape[-1]
if config.is_encoder_decoder:
inputs["decoder_input_ids"] = outputs_cached.sequences
if "decoder_attention_mask" in inputs:
inputs["decoder_attention_mask"] = torch.nn.functional.pad(
inputs["decoder_attention_mask"],
(0, new_attention_len - inputs["decoder_attention_mask"].shape[1]),
mode="constant",
value=1,
)
else:
inputs["input_ids"] = outputs_cached.sequences
if "attention_mask" in inputs:
inputs["attention_mask"] = torch.nn.functional.pad(
inputs["attention_mask"],
(0, new_attention_len - inputs["attention_mask"].shape[1]),
mode="constant",
value=1,
)
first_caches_scores = outputs_cached.scores
outputs_cached = model.generate(**inputs, **generate_kwargs, max_new_tokens=1)
full_cached_scores = first_caches_scores + outputs_cached.scores
outputs_cached.scores = full_cached_scores
# The two sets of generated text and past kv should be equal to each other
self.assertTrue(has_similar_generate_outputs(outputs, outputs_cached))
self._check_caches_are_equal(outputs.past_key_values, outputs_cached.past_key_values)
@pytest.mark.generate
def test_generate_continue_from_inputs_embeds(self):
"""Tests that we can continue generation from `inputs_embeds` and past key values returned from a previous `generate` call."""
for model_class in self.all_generative_model_classes:
if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt"]):
self.skipTest(reason="Won't fix: old model with unique inputs/caches/other")
if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]):
self.skipTest(reason="TODO: needs modeling or test input preparation fixes for compatibility")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if "token_type_ids" in inputs_dict:
del inputs_dict["token_type_ids"]
if config.is_encoder_decoder:
self.skipTest(reason="This model is encoder-decoder")
# TODO (joao, raushan): the correct line below is `if not hasattr(config.get_text_config(), "use_cache")`,
# but it breaks a few models. Fix and then apply `has_similar_generate_outputs` pattern
if not hasattr(config, "use_cache"):
self.skipTest(reason=f"{model_class.__name__} doesn't support caching")
model = model_class(config).to(torch_device).eval()
if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters:
self.skipTest(reason="This model does not support `inputs_embeds` in generation")
# If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format)
outputs = model(**inputs_dict)
if "past_key_values" not in outputs:
self.skipTest(reason="This model doesn't return `past_key_values`")
input_ids = inputs_dict.pop("input_ids")
model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1
model.generation_config.forced_eos_token_id = None
model.config.is_decoder = True
model.generation_config.use_cache = True
generation_kwargs = {
"return_dict_in_generate": True,
"do_sample": False,
}
# Traditional way of generating text, with `return_dict_in_generate` to return the past key values.
input_embeds = model.get_input_embeddings()(input_ids)
outputs = model.generate(inputs_embeds=input_embeds, max_new_tokens=4, **generation_kwargs)
# Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens)
initial_output = model.generate(inputs_embeds=input_embeds, max_new_tokens=3, **generation_kwargs)
continued_embeds = torch.cat([input_embeds, model.get_input_embeddings()(initial_output.sequences)], dim=1)
cached_output = model.generate(
inputs_embeds=continued_embeds,
max_new_tokens=1,
past_key_values=initial_output.past_key_values,
**generation_kwargs,
)
# Combine the (3 + 1) generated tokens and verify it matches with full generation.
combined_output_sequences = torch.concat([initial_output.sequences, cached_output.sequences], axis=1)
self.assertListEqual(outputs.sequences.tolist(), combined_output_sequences.tolist())
# The two sets of past kv should be equal to each other
self._check_caches_are_equal(outputs.past_key_values, cached_output.past_key_values)
@pytest.mark.generate
def test_generate_with_static_cache(self):
"""
Tests that generating with static cache give almost same results as with dynamic cache, and the output cache
has the expected shapes
"""
for model_class in self.all_generative_model_classes:
# Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly
# use a static cache because they don't create the causal masks correctly.
# TODO: cyril -> relax this by adding a `_support_static_cache` attribute
if not model_class._can_compile_fullgraph:
self.skipTest(reason="This model does not support the static cache format")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
set_config_for_less_flaky_test(config)
main_input = inputs_dict[model_class.main_input_name]
if config.is_encoder_decoder:
self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache")
config.is_decoder = True
batch_size = main_input.shape[0]
seq_length = self.model_tester.seq_length
max_new_tokens = 20
for dtype in (torch.float32, torch.float16):
model = model_class(copy.deepcopy(config)).to(torch_device).to(dtype).eval()
inputs_dict = {
k: v.to(dtype) if isinstance(v, torch.Tensor) and torch.is_floating_point(v) else v
for k, v in inputs_dict.items()
}
set_model_for_less_flaky_test(model)
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"return_dict_in_generate": True, # Required to return `past_key_values`
"output_scores": True,
"use_cache": True,
}
static_cache_generation = model.generate(
**generation_kwargs, **inputs_dict, cache_implementation="static"
)
# Check 1: The cache shapes must match the expected shapes
max_cache_len = seq_length + max_new_tokens - 1 # cache len = gen len - 1, the last token has no cache
text_config = config.text_config if hasattr(config, "text_config") else config
head_dim = (
getattr(text_config, "head_dim", None)
or text_config.hidden_size // text_config.num_attention_heads
)
num_key_value_heads = (
text_config.num_attention_heads
if getattr(text_config, "num_key_value_heads", None) is None
else text_config.num_key_value_heads
)
num_hidden_layers = text_config.num_hidden_layers
cache_shape = (batch_size, num_key_value_heads, max_cache_len, head_dim)
self.assertTrue(isinstance(static_cache_generation.past_key_values, StaticCache))
self.assertTrue(len(static_cache_generation.past_key_values) == num_hidden_layers)
self.assertTrue(static_cache_generation.past_key_values.layers[0].keys.shape == cache_shape)
# Check 2: The outputs must be similar to the case with dynamic cache
dynamic_cache_generation = model.generate(**generation_kwargs, **inputs_dict)
self.assertTrue(has_similar_generate_outputs(dynamic_cache_generation, static_cache_generation))
@require_optimum_quanto
@pytest.mark.generate
def test_generate_with_quant_cache(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if config.is_encoder_decoder or not model_class._supports_default_dynamic_cache():
self.skipTest(reason="This model does not support the quantized cache format")
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
generation_kwargs = {
"max_new_tokens": 5,
"cache_implementation": "quantized",
# careful with group size, should be divisor of model's hidden size
"cache_config": {"backend": "quanto", "nbits": 2, "q_group_size": 8, "residual_length": 128},
"return_dict_in_generate": True, # Required to return `past_key_values`
"use_cache": True,
}
results = model.generate(**generation_kwargs, **inputs_dict)
self.assertTrue(all(isinstance(layer, QuantoQuantizedLayer) for layer in results.past_key_values.layers))
# passing past key values of different type should raise Error
with self.assertRaises(ValueError):
model.generate(past_key_valyes=DynamicCache(config=model.config), **generation_kwargs, **inputs_dict)
# setting incorrect cache_config args should raise an Error, i.e. nbits=60 does not make sense
generation_kwargs["cache_config"] = {"nbits": 60, "q_group_size": 8, "residual_length": 128}
with self.assertRaises(ValueError):
model.generate(**generation_kwargs, **inputs_dict)
@pytest.mark.generate
@pytest.mark.torch_compile_test
@require_torch_greater_or_equal("2.6") # Uses torch.compiler.set_stance
def test_generate_compile_model_forward_fullgraph(self):
"""
Tests that `.generate` is compatible with torch.compile, keeping the same results. Also confirms that
`.forward` called from `.generate` sees no graph breaks or recompilations when compiled.
⚠️ Runs two sequential generations to ensure the cache doesn't get stuck after the first compiled run! ⚠️
"""
for model_class in self.all_generative_model_classes:
# 1. Test exclusion criteria
if not model_class._can_compile_fullgraph:
self.skipTest("This model doesn't support compilation without graph breaks")
# 2. Prepares two sets of inputs
config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=4)
set_config_for_less_flaky_test(config)
model = model_class(config).to(torch_device)
set_model_for_less_flaky_test(model)
model.eval() # otherwise `self.training` is `True` -- this flag is used at attn mask creation time
# Some composite models have a custom generate and will call an inner model's generate -> that inner model
# is the one that gets compiled.
# (Note for the future: if BLIP starts causing problems, let's stop testing it)
if "blip" in model.__class__.__name__.lower():
model_to_be_compiled = model.language_model
else:
model_to_be_compiled = model
# creates two sets of *different* inputs with the same shape
main_input = inputs_dict[model.main_input_name].to(torch_device)
half_batch_size = main_input.shape[0] // 2
input_1 = {}
input_2 = {}
for key, value in inputs_dict.items():
if isinstance(value, torch.Tensor):
input_1[key] = value[:half_batch_size, :].to(torch_device)
input_2[key] = value[half_batch_size : half_batch_size * 2, :].to(torch_device)
else:
input_1[key] = value
input_2[key] = value
model_input_sets = [input_1, input_2]
self.assertTrue(
model_input_sets[0][model.main_input_name].shape == model_input_sets[1][model.main_input_name].shape
)
# 3. compilation-specific setup and generation parameterization
torch.compiler.reset() # prevent cached compilation from being used in the test
has_defined_cache_implementation = model.generation_config.cache_implementation is not None
compile_config = CompileConfig(fullgraph=True, dynamic=False) # Error out on dynamic shapes
compile_config._compile_all_devices = True # force compilation (e.g. fast CI, CPU)
generation_kwargs = {
"use_cache": True,
"do_sample": False,
"max_new_tokens": 5,
"return_dict_in_generate": True,
"output_scores": True,
"compile_config": compile_config,
}
# 4. get eager + dynamic cache results for future comparison
dynamic_outputs = []
# Ignores all `torch.compile` usage, useful to test models that that have non-default compilable caches
# (who would have used compilation in this section)
with torch.compiler.set_stance("force_eager"):
for model_inputs in model_input_sets:
gen_out = model.generate(**model_inputs, **generation_kwargs)
dynamic_outputs.append(gen_out)
# sanity checks for the default cache implementation
if not has_defined_cache_implementation:
decoder_cache = (
gen_out.past_key_values.self_attention_cache
if config.is_encoder_decoder
else gen_out.past_key_values
)
self.assertTrue(isinstance(decoder_cache, DynamicCache))
self.assertFalse(decoder_cache.is_compileable)
# our auto compile should NOT have been called
self.assertFalse(hasattr(model_to_be_compiled, "_compiled_call"))
# 5. get compiled results -- relies on the automatic compilation triggered by specific compilable caches
if not has_defined_cache_implementation:
generation_kwargs["cache_implementation"] = "static"
compiled_outputs = []
# Uses a context manager to catch recompilation logs. If there is any recompilation, this test fails.
# Try/Finally is used to ensure that the log options are reset even if an error is raised.
try:
torch._logging.set_logs(recompiles_verbose=True)
logger = logging.get_logger("torch._dynamo.guards")
with CaptureLogger(logger) as cl:
for model_inputs in model_input_sets:
# with torch.compiler.set_stance("fail_on_recompile"):
gen_out = model.generate(**model_inputs, **generation_kwargs)
compiled_outputs.append(gen_out)
# sanity checks
decoder_cache = (
gen_out.past_key_values.self_attention_cache
if config.is_encoder_decoder
else gen_out.past_key_values
)
self.assertFalse(isinstance(decoder_cache, DynamicCache))
self.assertTrue(decoder_cache.is_compileable)
# our auto compile should have been called
self.assertTrue(hasattr(model_to_be_compiled, "_compiled_call"))
finally:
torch._logging.set_logs()
# Compilation of sliding layers necessarily has recompiles with `dynamic=False` - however this test
# still checks that `fullgraph=True` is supported in this case, as compilation with `dynamic=None`
# is the default and does not actually lead to too many recompiles
has_sliding_layers = any(decoder_cache.is_sliding)
has_recompilation = "Recompiling" in cl.out or ("guard" in cl.out and "failure" in cl.out)
if not has_sliding_layers and has_recompilation:
raise RuntimeError(
f"`torch.compile` recompiled part of the forward pass in {model.__class__.__name__}. "
"See the test logs for more details."
)
for dynamic_result, compiled_result in zip(dynamic_outputs, compiled_outputs):
self.assertTrue(has_similar_generate_outputs(dynamic_result, compiled_result))
@pytest.mark.generate
def test_generate_compilation_all_outputs(self):
"""
Tests that all optional outputs are behaving as expected when compilation is triggered.
In essence, it's the same as `test_greedy_generate_dict_outputs`, but with automatic compilation triggered.
"""
for model_class in self.all_generative_model_classes:
# Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly
# use a static cache because they don't create the causal masks correctly.
# TODO: cyril -> relax this by adding a `_support_static_cache` attribute
if not model_class._can_compile_fullgraph:
self.skipTest(reason="This model does not support the static cache format")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
if self.has_attentions:
config._attn_implementation = "eager" # can't output attentions otherwise
model = model_class(config).to(torch_device).eval()
# compilation-specific setup
torch.compiler.reset() # prevent cached compilation from being used in the test
has_defined_cache_implementation = model.generation_config.cache_implementation is not None
# BLIP is the only exception with custom generate which call `self.lm.generate()`
# We should avoid such calls in all subsequent multimodal models and try to make `generate()`
# compatible with multimodality
compile_config = CompileConfig()
compile_config._compile_all_devices = True
if "blip" in model.__class__.__name__.lower():
model.language_model.generation_config.compile_config = compile_config
if not has_defined_cache_implementation:
model.language_model.generation_config.cache_implementation = "static"
else:
# force compilation (e.g. fast CI, CPU)
model.generation_config.compile_config = compile_config
if not has_defined_cache_implementation:
model.generation_config.cache_implementation = "static"
logits_processor_kwargs = self._get_logits_processor_kwargs(do_sample=False, config=model.config)
output_generate = model.generate(
do_sample=False,
num_beams=1,
max_new_tokens=self.max_new_tokens,
min_new_tokens=self.max_new_tokens,
output_attentions=True,
output_hidden_states=True,
output_scores=True,
output_logits=True,
return_dict_in_generate=True,
use_cache=True,
**logits_processor_kwargs,
**inputs_dict,
)
if "blip" in model.__class__.__name__.lower():
self.assertTrue(hasattr(model.language_model, "_compiled_call"))
else:
self.assertTrue(hasattr(model, "_compiled_call")) # our auto compile should have been called
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[1] == self.max_new_tokens + 1)
self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput)
else:
self.assertTrue(
output_generate.sequences.shape[1] == self.max_new_tokens + inputs_dict["input_ids"].shape[1]
)
self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput)
self._check_generate_outputs(output_generate, model.config, use_cache=True)
@pytest.mark.generate
def test_generate_methods_with_logits_to_keep(self):
for model_class in self.all_generative_model_classes:
if "logits_to_keep" not in set(inspect.signature(model_class.forward).parameters.keys()):
self.skipTest(reason="This model does not support `logits_to_keep` argument.")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
# All generation methods (except assisted decoding) rely on always extracting the last token logits of the
# full logits matrix, so testing out only greedy search and assisted decoding is enough (if it works,
# other methods will work as well)
generation_kwargs = {
"max_new_tokens": 10,
"do_sample": False,
}
# Setting logits_to_keep at 0 keeps all logits (old behavior)
with_all_logits = model.generate(**generation_kwargs, **inputs_dict, logits_to_keep=0)
# By default, logits_to_keep is automatically set to 1 if not provided (new behavior)
without_all_logits = model.generate(**inputs_dict, **generation_kwargs)
self.assertEqual(with_all_logits.tolist(), without_all_logits.tolist())
@pytest.mark.generate
def test_inherits_generation_mixin(self):
"""
Tests that the model class directly inherits `GenerationMixin`, as opposed to relying on `PreTrainedModel`
to inherit it.
"""
for model_class in self.all_generative_model_classes:
self.assertTrue("GenerationMixin" in str(model_class.__bases__))
@pytest.mark.generate
def test_prepare_inputs_for_generation_kwargs_forwards(self, **extra_kwargs):
"""Tests that prepare_inputs_for_generation forwards arbitrary kwargs."""
for model_class in self.all_generative_model_classes:
config, _ = self.prepare_config_and_inputs_for_generate()
model = model_class(config).to(torch_device).eval()
input_ids = torch.tensor([[1, 2, 3], [4, 5, 6]]).to(torch_device)
input_args = {
"input_ids": input_ids,
"cache_position": torch.tensor([9]).to(torch_device),
"position_ids": torch.tensor([[0, 1, 2], [0, 1, 2]]).to(torch_device),
}
arbitrary_kwargs = {
"output_attentions": True,
"output_hidden_states": True,
"custom_arg": "test_value",
"numeric_arg": 42,
}
model_inputs = model.prepare_inputs_for_generation(**input_args, **arbitrary_kwargs, **extra_kwargs)
# Verify that input_ids has proper name
if config.is_encoder_decoder:
self.assertTrue("decoder_input_ids" in model_inputs)
else:
self.assertTrue("input_ids" in model_inputs)
# Verify that arbitrary kwargs are forwarded
for key, value in arbitrary_kwargs.items():
self.assertTrue(key in model_inputs)
self.assertTrue(model_inputs[key] == value)
def _test_attention_implementation(self, attn_implementation):
"""
Compares the output of generate with the eager attention implementation against other implementations.
NOTE: despite the test logic being the same, different implementations actually need different decorators, hence
this separate function.
"""
max_new_tokens = 3
support_flag = {
"sdpa": "_supports_sdpa",
"flash_attention_2": "_supports_flash_attn",
"flash_attention_3": "_supports_flash_attn",
}
for model_class in self.all_generative_model_classes:
if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]):
self.skipTest(f"{model_class.__name__} does not support `attn_implementation={attn_implementation}`")
config, original_inputs_dict = self.prepare_config_and_inputs_for_generate()
inputs_dict = {}
for input_name, input_data in original_inputs_dict.items():
if isinstance(input_data, torch.Tensor) and input_data.dtype in [torch.float32, torch.bfloat16]:
inputs_dict[input_name] = input_data.to(torch.float16)
else:
inputs_dict[input_name] = input_data
main_input = inputs_dict[model_class.main_input_name]
# FA2 doesn't accept masking in the middle of the sequence for now. We usually generate right-padded
# attention masks at test time and, with generate, the mask will be appended with 1s on the right,
# resulting in a mask with holes (not supported properly by FA2).
if attn_implementation == "flash_attention_2":
for input_name in ("attention_mask", "decoder_attention_mask", "encoder_attention_mask"):
if input_name in inputs_dict:
inputs_dict[input_name] = torch.ones_like(inputs_dict[input_name])
# make sure that all models have enough positions for generation
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + main_input.shape[1] + 1
set_config_for_less_flaky_test(config)
model = model_class(config)
# If not all sub-models support flex, skip the test. We could potentially set not supported backbones
# to "eager" attention, leaving it for future updates on multimodality tests
sub_models_supporting_attn = [
getattr(module, support_flag[attn_implementation])
for name, module in model.named_modules()
if isinstance(module, PreTrainedModel) and name != ""
]
if not all(sub_models_supporting_attn) and len(sub_models_supporting_attn) > 0:
self.skipTest(
f"One of {model_class.__name__}'s backbones does not support `attn_implementation={attn_implementation}`"
)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
del model
gc.collect()
generate_kwargs = {
"max_new_tokens": max_new_tokens,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
"use_cache": True,
}
model_eager = model_class.from_pretrained(
tmpdirname,
dtype=torch.float16,
attn_implementation="eager",
).to(torch_device)
set_model_for_less_flaky_test(model_eager)
res_eager = model_eager.generate(**inputs_dict, **generate_kwargs)
del model_eager
gc.collect()
model_attn = model_class.from_pretrained(
tmpdirname,
dtype=torch.float16,
attn_implementation=attn_implementation,
).to(torch_device)
set_model_for_less_flaky_test(model_attn)
res_attn = model_attn.generate(**inputs_dict, **generate_kwargs)
del model_attn
gc.collect()
self.assertTrue(has_similar_generate_outputs(res_eager, res_attn, atol=1e-3, rtol=1e-3))
@pytest.mark.generate
@slow
def test_eager_matches_sdpa_generate(self):
"""Tests that generate has equivalent outputs with SDPA and eager attention implementations."""
self._test_attention_implementation("sdpa")
@pytest.mark.flash_attn_test
@require_flash_attn
@require_torch_accelerator
@slow
def test_eager_matches_fa2_generate(self):
"""Tests that generate has equivalent outputs with FA2 and eager attention implementations."""
self._test_attention_implementation("flash_attention_2")
@pytest.mark.flash_attn_3_test
@require_flash_attn_3
@require_torch_gpu
@slow
def test_eager_matches_fa3_generate(self):
"""Tests that generate has equivalent outputs with FA3 and eager attention implementations."""
self._test_attention_implementation("flash_attention_3")
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
def test_flash_attention_2_continue_generate_with_position_ids(self):
"""
Tests whether flash attention can continue its generation from given position ids.
NOTE: This serves as regression check as we had instances where flash attention entered the varlen
path here. It should now always enter the base `flash_fn`.
"""
max_new_tokens = 2
for model_class in self.all_generative_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(f"{model_class.__name__} does not support Flash Attention.")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if config.is_encoder_decoder:
self.skipTest("Model is an encoder-decoder")
if not hasattr(config.get_text_config(), "use_cache"):
self.skipTest(f"{model_class.__name__} doesn't support caching")
if "input_ids" not in inputs_dict or inputs_dict["input_ids"].ndim != 2:
self.skipTest("Model dummy inputs should contain text input ids")
# make sure that all models have enough positions for generation
dummy_input_ids = inputs_dict["input_ids"]
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input_ids.shape[1] + 1
model = model_class(config)
if "position_ids" not in inspect.signature(model.forward).parameters:
self.skipTest("Model does not support position_ids")
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
)
.to(torch_device)
.eval()
)
# Drop all keys except for `input_ids`. Hard to manipulate with multimodals etc
dummy_input_ids = inputs_dict["input_ids"]
dummy_position_ids = torch.arange(dummy_input_ids.shape[1], device=torch_device)
dummy_position_ids = dummy_position_ids.unsqueeze(0).repeat(dummy_input_ids.shape[0], 1)
# Store cache for the input prompt
output = model(dummy_input_ids, position_ids=dummy_position_ids, use_cache=True)
if "past_key_values" not in output:
self.skipTest("This model doesn't return `past_key_values`")
# create new input_ids and position_ids to continue generation re-using the cache
new_input_ids = output.logits[:, -1, :].float().argmax(-1)[:, None]
past_length = dummy_input_ids.shape[1]
position_ids = torch.arange(past_length, past_length + new_input_ids.shape[1], device=torch_device)
position_ids = position_ids.unsqueeze(0).repeat(new_input_ids.shape[0], 1)
output = model(
input_ids=new_input_ids,
past_key_values=output.past_key_values,
position_ids=position_ids,
use_cache=True,
)
next_token_logits = output.logits[:, -1, :].float()
generate_kwargs = {
"pad_token_id": -1,
"eos_token_id": -1,
"forced_eos_token_id": None,
"use_cache": True,
"do_sample": False,
"return_dict_in_generate": True,
"output_logits": True,
"max_new_tokens": max_new_tokens,
}
generation_out = model.generate(dummy_input_ids, **generate_kwargs)
next_token_logits_from_generate = generation_out.logits[-1]
# acceptable numerical instability
tol = torch.finfo(torch.bfloat16).eps
torch.testing.assert_close(next_token_logits_from_generate, next_token_logits, rtol=tol, atol=tol)
def attention_mask_padding_matches_padding_free_with_position_ids(
self, attn_implementation: str, fa_kwargs: bool = False
):
"""
Tests that the given attention implementation can work with packed sequences and infers the mask
from position ids. This test requires the model to use new attention mask API which handles packing.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
max_new_tokens = 30
support_flag = {
"sdpa": "_supports_sdpa",
"flash_attention_2": "_supports_flash_attn",
"flash_attention_3": "_supports_flash_attn",
}
for model_class in self.all_generative_model_classes:
if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]):
self.skipTest(f"{model_class.__name__} does not support {attn_implementation}")
# can't infer if new attn mask API is supported by assume that only model with attention backend support it
if not model_class._supports_attention_backend:
self.skipTest(f"{model_class.__name__} does not support new attention mask API")
if model_class._is_stateful: # non-transformer models most probably have no packing support
self.skipTest(f"{model_class.__name__} doesn't support packing!")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if config.is_encoder_decoder:
self.skipTest("Model is an encoder-decoder")
if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict:
self.skipTest("Model dummy inputs should contain padding in their attention mask")
if "input_ids" not in inputs_dict or inputs_dict["input_ids"].ndim != 2:
self.skipTest("Model dummy inputs should contain text input ids")
# make sure that all models have enough positions for generation
dummy_input_ids = inputs_dict["input_ids"]
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input_ids.shape[1] + 1
model = model_class(config)
if "position_ids" not in inspect.signature(model.forward).parameters:
self.skipTest("Model does not support position_ids")
if (not fa_kwargs) and "position_ids" not in inspect.signature(model.forward).parameters:
continue # this model doesn't accept position ids as input
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Drop all keys except for the minimal set. Hard to manipulate with multimodals etc
inputs_dict = {k: v for k, v in inputs_dict.items() if k in ["input_ids", "attention_mask"]}
# Ensure left padding, to adapt for some models
if 0 in inputs_dict["attention_mask"][:, -1]:
inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1)
dummy_attention_mask = inputs_dict["attention_mask"]
dummy_input_ids[~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation=attn_implementation,
)
.to(torch_device)
.eval()
)
if fa_kwargs:
# flatten
features = [
{"input_ids": i[a.bool()].tolist()} for i, a in zip(dummy_input_ids, dummy_attention_mask)
]
# add position_ids + fa_kwargs
data_collator = DataCollatorWithFlattening(return_tensors="pt", return_flash_attn_kwargs=True)
batch = data_collator(features)
padfree_inputs_dict = {
k: t.to(torch_device) if torch.is_tensor(t) else t for k, t in batch.items()
}
else:
# create packed position_ids
position_ids = (
torch.cat([torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()])
.long()
.unsqueeze(0)
.to(torch_device)
)
padfree_inputs_dict = {
"input_ids": dummy_input_ids[dummy_attention_mask.bool()].unsqueeze(0),
"position_ids": position_ids,
}
# We need to do simple forward without cache in order to trigger packed SDPA/flex/eager attention path
res_padded = model(**inputs_dict, use_cache=False)
res_padfree = model(**padfree_inputs_dict, use_cache=False)
logits_padded = res_padded.logits[dummy_attention_mask.bool()]
logits_padfree = res_padfree.logits[0]
# acceptable numerical instability
tol = torch.finfo(torch.bfloat16).eps
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
def test_eager_padding_matches_padding_free_with_position_ids(self):
self.attention_mask_padding_matches_padding_free_with_position_ids(attn_implementation="eager")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
self.attention_mask_padding_matches_padding_free_with_position_ids(attn_implementation="sdpa")
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
self.attention_mask_padding_matches_padding_free_with_position_ids(attn_implementation="flash_attention_2")
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@slow
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
self.attention_mask_padding_matches_padding_free_with_position_ids(
attn_implementation="flash_attention_2", fa_kwargs=True
)
@require_flash_attn_3
@require_torch_gpu
@pytest.mark.flash_attn_3_test
@slow
def test_flash_attention_3_padding_matches_padding_free_with_position_ids(self):
self.attention_mask_padding_matches_padding_free_with_position_ids(attn_implementation="flash_attention_3")
@require_flash_attn_3
@require_torch_gpu
@pytest.mark.flash_attn_3_test
@slow
def test_flash_attention_3_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
self.attention_mask_padding_matches_padding_free_with_position_ids(
attn_implementation="flash_attention_3", fa_kwargs=True
)
def _get_custom_4d_mask_test_data(self):
# Sequence in which all but the last token is the same
input_ids = torch.tensor(
[[10, 11, 12, 13], [10, 11, 12, 14], [10, 11, 12, 15]], device=torch_device, dtype=torch.int64
)
position_ids = torch.tensor([[0, 1, 2, 3]] * 3, device=torch_device, dtype=torch.int64)
# Combining common prefix with the unique ending tokens:
input_ids_shared_prefix = torch.cat([input_ids[0][:-1], input_ids[:, -1]]).unsqueeze(0)
# Creating a 4D mask where each of the last 3 tokens do not attend to each other.
mask_shared_prefix = torch.tensor(
[
[
[
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 1, 0],
[1, 1, 1, 0, 0, 1],
]
]
],
)
# inverting the attention mask
mask_dtype = torch.float32
min_dtype = torch.finfo(mask_dtype).min
mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=mask_dtype, device=torch_device) * min_dtype
# Creating a position_ids tensor. note the repeating figures in the end.
position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64)
return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix
def test_custom_4d_attention_mask(self):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
for model_class in self.all_generative_model_classes:
if not model_class._can_compile_fullgraph:
self.skipTest(f"{model_class.__name__} is not guaranteed to work with custom 4D attention masks")
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
set_config_for_less_flaky_test(config)
if getattr(config, "sliding_window", 0) is not None and getattr(config, "sliding_window", 0) > 0:
self.skipTest(f"{model_class.__name__} with sliding window attention is not supported by this test")
model = model_class(config).to(device=torch_device, dtype=torch.float32).eval()
set_model_for_less_flaky_test(model)
if "position_ids" not in inspect.signature(model.forward).parameters:
continue # model doesn't accept position ids and probably has special way to model positions
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self._get_custom_4d_mask_test_data()
logits = model.forward(input_ids, position_ids=position_ids).logits
# logits.shape == torch.Size([3, 4, ...])
logits_shared_prefix = model(
input_ids_shared_prefix,
attention_mask=mask_shared_prefix,
position_ids=position_ids_shared_prefix,
)[0]
# logits_shared_prefix.shape == torch.Size([1, 6, ...])
out_last_tokens = logits[:, -1, :] # last tokens in each batch line
out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens
# comparing softmax-normalized logits:
normalized_0 = F.softmax(out_last_tokens, dim=-1)
normalized_1 = F.softmax(out_shared_prefix_last_tokens, dim=-1)
torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-3)
def test_forward_with_logits_to_keep(self):
for model_class in self.all_generative_model_classes:
if "logits_to_keep" not in set(inspect.signature(model_class.forward).parameters.keys()):
self.skipTest(reason="This model does not support `logits_to_keep` argument.")
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
batch_size, sequence_length = inputs["input_ids"].shape[:2]
vocab_size = config.get_text_config().vocab_size
model = model_class(config).to(device=torch_device).eval()
# some models have labels but `logits_to_keep` should not be used in train mode
_ = inputs.pop("labels", None)
# logits_to_keep=0 is a special case meaning "keep all logits"
all_logits = model(**inputs, logits_to_keep=0).logits
last_token_logits = model(**inputs, logits_to_keep=1).logits
# Assert all shapes are correct
self.assertEqual(tuple(all_logits.shape), (batch_size, sequence_length, vocab_size))
self.assertEqual(tuple(last_token_logits.shape), (batch_size, 1, vocab_size))
# Assert the last tokens are actually the same (except for the natural fluctuation due to order of FP ops)
torch.testing.assert_close(all_logits[:, -1:, :], last_token_logits, rtol=1e-5, atol=1e-5)
def _check_generate_outputs(self, output, config, use_cache=False, num_return_sequences=1, num_beams=1):
input_batch_size = int(output.sequences.shape[0] / num_return_sequences)
internal_batch_size = (
input_batch_size * num_beams if num_beams > 1 else input_batch_size * num_return_sequences
)
prompt_length = getattr(self.model_tester, "seq_length", None)
prompt_length = getattr(self.model_tester, "encoder_seq_length", prompt_length)
prompt_length = getattr(self.model_tester, "text_seq_length", prompt_length)
config = config.text_config if hasattr(config, "text_config") else config
generated_length = (
output.sequences.shape[1] - 1 if config.is_encoder_decoder else output.sequences.shape[1] - prompt_length
)
cache = getattr(output, "past_key_values", None)
decoder_past_key_values = cache.self_attention_cache if isinstance(cache, EncoderDecoderCache) else cache
# in some models we subsample the sequence length in inner layers
if hasattr(self.model_tester, "get_subsampled_output_lengths"):
prompt_length = self.model_tester.get_subsampled_output_lengths(prompt_length)
# scores
self._check_scores(
batch_size=internal_batch_size, scores=output.scores, generated_length=generated_length, config=config
)
# unprocessed logits
self._check_logits(batch_size=internal_batch_size, logits=output.logits, config=config)
# Attentions
if self.has_attentions:
if config.is_encoder_decoder:
# encoder
self._check_encoder_attention_for_generate(
attentions=output.encoder_attentions,
batch_size=input_batch_size,
config=config,
prompt_length=prompt_length,
)
# decoder
self._check_attentions_for_generate(
batch_size=internal_batch_size,
attentions=output.decoder_attentions,
prompt_length=1, # the BOS token
output_length=output.sequences.shape[1],
config=config,
decoder_past_key_values=decoder_past_key_values,
)
else:
self._check_attentions_for_generate(
batch_size=internal_batch_size,
attentions=output.attentions,
prompt_length=prompt_length,
output_length=output.sequences.shape[1],
config=config,
decoder_past_key_values=decoder_past_key_values,
)
# Hidden States
if config.is_encoder_decoder:
# encoder
self._check_encoder_hidden_states_for_generate(
hidden_states=output.encoder_hidden_states,
batch_size=input_batch_size,
config=config,
prompt_length=prompt_length,
)
# decoder
self._check_hidden_states_for_generate(
batch_size=internal_batch_size,
hidden_states=output.decoder_hidden_states,
prompt_length=1, # the BOS token
output_length=output.sequences.shape[1],
config=config,
use_cache=use_cache,
)
else:
self._check_hidden_states_for_generate(
batch_size=internal_batch_size,
hidden_states=output.hidden_states,
prompt_length=prompt_length,
output_length=output.sequences.shape[1],
config=config,
use_cache=use_cache,
)
# Check the cache shape
if use_cache:
cache_length = output.sequences.shape[1] - 1
self._check_past_key_values_for_generate(
batch_size=internal_batch_size,
past_key_values=cache,
seq_length=cache_length,
config=config,
)
# xlnet has a weird list cache, which is returned even with `use_cache=False`...
elif "xlnet" not in config.__class__.__name__.lower():
self.assertTrue(cache is None)
def _check_scores(self, batch_size, scores, generated_length, config):
vocab_size = config.get_text_config(decoder=True).vocab_size
expected_shape = (batch_size, vocab_size)
self.assertIsInstance(scores, tuple)
self.assertEqual(len(scores), generated_length)
self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores))
def _check_logits(self, batch_size, logits, config):
vocab_size = config.get_text_config(decoder=True).vocab_size
self.assertIsInstance(logits, tuple)
self.assertListEqual([iter_logits.shape[0] for iter_logits in logits], [batch_size] * len(logits))
# vocabulary difference equal to one (imagegptmodel?) or zero (all other models)
vocab_diff = vocab_size - logits[0].shape[-1]
self.assertTrue(vocab_diff in [0, 1])
self.assertListEqual([vocab_size - score.shape[-1] for score in logits], [vocab_diff] * len(logits))
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (output_length - prompt_length))
use_cache = decoder_past_key_values is not None
has_static_cache = isinstance(decoder_past_key_values, StaticCache)
# When `output_attentions=True`, each iteration of generate appends the attentions corresponding to the new
# token(s)
# NOTE: `StaticCache` may have different lengths on different layers, if this test starts failing add more
# elaborate checks
for generated_length, iter_attentions in enumerate(attentions):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
query_length = (
prompt_length + generated_length
if not has_static_cache
else decoder_past_key_values.get_max_cache_shape()
)
expected_shape = (
batch_size,
config.num_attention_heads,
model_input_length,
query_length,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions)
)
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length):
encoder_expected_shape = (batch_size, config.num_attention_heads, prompt_length, prompt_length)
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[layer_attentions.shape for layer_attentions in attentions],
[encoder_expected_shape] * len(attentions),
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (output_length - prompt_length))
# When `output_hidden_states=True`, each iteration of generate appends the hidden states corresponding to the
# new token(s)
# NOTE: `StaticCache` may have different lengths on different layers, if this test starts failing add more
# elaborate checks
for generated_length, iter_hidden_states in enumerate(hidden_states):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
expected_shape = (batch_size, model_input_length, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, prompt_length):
encoder_expected_shape = (batch_size, prompt_length, config.hidden_size)
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in hidden_states],
[encoder_expected_shape] * len(hidden_states),
)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
# Raise a useful error, asking to explicitly override the method
if not isinstance(past_key_values, Cache):
raise ValueError("The cache is not standard! Please overwrite `_check_past_key_values_for_generate`")
# In this case, we simply call recursively the function on both internal caches
if isinstance(past_key_values, EncoderDecoderCache):
self._check_past_key_values_for_generate(
batch_size, past_key_values.self_attention_cache, seq_length, config
)
# For cross attention cache, the seq_length depends on the model, so we remove that dim
self._check_past_key_values_for_generate(batch_size, past_key_values.cross_attention_cache, None, config)
return
# Use the correct config
config = config.get_text_config(decoder=True)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
hidden_size = getattr(config, "d_model", config.hidden_size)
head_dim = getattr(config, "head_dim", hidden_size // config.num_attention_heads)
# For cross attention cache, the seq_length depends on the model, so we remove that dim
expected_shape = (
(batch_size, num_heads, seq_length, head_dim)
if seq_length is not None
else (batch_size, num_heads, head_dim)
)
# Check the size is coherent
num_hidden_layers = config.num_hidden_layers
if getattr(config, "num_kv_shared_layers", None) is not None:
num_hidden_layers -= config.num_kv_shared_layers
self.assertEqual(num_hidden_layers, len(past_key_values))
# Check each layer has the correct shape
for layer in past_key_values.layers:
# Remove the seq_length dim for cross-attention cache (it changes based on the model)
keys = layer.keys if seq_length is not None else layer.keys[:, :, 0, :]
values = layer.values if seq_length is not None else layer.values[:, :, 0, :]
self.assertEqual(keys.shape, expected_shape)
self.assertEqual(values.shape, expected_shape)
def _check_sequence_inside_sequence(self, tensor_1, tensor_2):
# check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1.
# set to same device. we don't care what device.
if not isinstance(tensor_1, list):
tensor_1 = tensor_1.tolist()
if not isinstance(tensor_2, list):
tensor_2 = tensor_2.tolist()
in_order = len(tensor_1) <= len(tensor_2)
longer = tensor_2 if in_order else tensor_1
shorter = tensor_1 if in_order else tensor_2
flag = False
chunk_size = len(shorter)
for chunk_idx in range(len(longer) - chunk_size + 1):
subseq = longer[chunk_idx : chunk_idx + chunk_size]
if subseq == shorter:
flag = True
break
self.assertTrue(flag)
def _check_caches_are_equal(self, cache1: Cache, cache2: Cache):
if not isinstance(cache1, Cache) or not isinstance(cache2, Cache):
raise ValueError("The cache is not standard! Please overwrite `_check_caches_are_equal`")
# In this case, we simply call recursively the function on both internal caches
if isinstance(cache1, EncoderDecoderCache):
self._check_caches_are_equal(cache1.self_attention_cache, cache2.self_attention_cache)
self._check_caches_are_equal(cache1.cross_attention_cache, cache2.cross_attention_cache)
return
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
torch.testing.assert_close(cache1.layers[idx].keys, cache2.layers[idx].keys)
torch.testing.assert_close(cache1.layers[idx].values, cache2.layers[idx].values)
@require_torch
| GenerationTesterMixin |
python | django__django | tests/admin_inlines/models.py | {
"start": 7424,
"end": 7727
} | class ____(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return "/child_model2/"
# Models for #19425
| ChildModel2 |
python | kamyu104__LeetCode-Solutions | Python/number-of-subarrays-with-lcm-equal-to-k.py | {
"start": 770,
"end": 1348
} | class ____(object):
def subarrayLCM(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
def lcm(a, b):
return a//gcd(a, b)*b
result = 0
for i in xrange(len(nums)):
l = 1
for j in xrange(i, len(nums)):
if k%nums[j]:
break
l = lcm(l, nums[j])
result += int(l == k)
return result
| Solution2 |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0005_sync_project_model.py | {
"start": 100,
"end": 1898
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0004_add_project_container_image"),
]
operations = [
migrations.AlterField(
model_name="project",
name="documentation_type",
field=models.CharField(
default=b"sphinx",
help_text='Type of documentation you are building. <a href="http://sphinx-doc.org/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info</a>.',
max_length=20,
verbose_name="Documentation type",
choices=[
(b"auto", "Automatically Choose"),
(b"sphinx", "Sphinx Html"),
(b"mkdocs", "Mkdocs (Markdown)"),
(b"sphinx_htmldir", "Sphinx HtmlDir"),
(b"sphinx_singlehtml", "Sphinx Single Page HTML"),
],
),
),
migrations.AlterField(
model_name="project",
name="single_version",
field=models.BooleanField(
default=False,
help_text='A single version site has no translations and only your "latest" version, served at the root of the domain. Use this with caution, only turn it on if you will <b>never</b>have multiple versions of your docs.',
verbose_name="Single version",
),
),
migrations.AlterField(
model_name="project",
name="use_virtualenv",
field=models.BooleanField(
default=False,
help_text="Install your project inside a virtualenv using <code>setup.py install</code>",
verbose_name="Install Project",
),
),
]
| Migration |
python | spyder-ide__spyder | external-deps/python-lsp-server/test/fixtures.py | {
"start": 1138,
"end": 1217
} | class ____(FakeEditorMethodsMixin, PythonLSPServer):
pass
| FakePythonLSPServer |
python | vyperlang__vyper | vyper/semantics/environment.py | {
"start": 414,
"end": 771
} | class ____(_EnvType):
_id = "block"
_type_members = {
"coinbase": AddressT(),
"difficulty": UINT256_T,
"prevrandao": BYTES32_T,
"number": UINT256_T,
"gaslimit": UINT256_T,
"basefee": UINT256_T,
"blobbasefee": UINT256_T,
"prevhash": BYTES32_T,
"timestamp": UINT256_T,
}
| _Block |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 935238,
"end": 935810
} | class ____(
sgqlc.types.Type,
Node,
AuditEntry,
RepositoryAuditEntryData,
OrganizationAuditEntryData,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("fork_parent_name", "fork_source_name", "visibility")
fork_parent_name = sgqlc.types.Field(String, graphql_name="forkParentName")
fork_source_name = sgqlc.types.Field(String, graphql_name="forkSourceName")
visibility = sgqlc.types.Field(
RepoCreateAuditEntryVisibility, graphql_name="visibility"
)
| RepoCreateAuditEntry |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_pii.py | {
"start": 3017,
"end": 4235
} | class ____:
"""Test IP address detection."""
def test_detect_valid_ipv4(self):
content = "Server IP: 192.168.1.1"
matches = detect_ip(content)
assert len(matches) == 1
assert matches[0]["type"] == "ip"
assert matches[0]["value"] == "192.168.1.1"
def test_detect_multiple_ips(self):
content = "Connect to 10.0.0.1 or 8.8.8.8"
matches = detect_ip(content)
assert len(matches) == 2
assert matches[0]["value"] == "10.0.0.1"
assert matches[1]["value"] == "8.8.8.8"
def test_invalid_ip_not_detected(self):
# Out of range octets
content = "Not an IP: 999.999.999.999"
matches = detect_ip(content)
assert len(matches) == 0
def test_version_number_not_detected(self):
# Version numbers should not be detected as IPs
content = "Version 1.2.3.4 released"
matches = detect_ip(content)
# This is a valid IP format, so it will be detected
# This is acceptable behavior
assert len(matches) >= 0
def test_no_ip(self):
content = "No IP addresses here."
matches = detect_ip(content)
assert len(matches) == 0
| TestIPDetection |
python | bokeh__bokeh | tests/unit/bokeh/client/test_util__client.py | {
"start": 2002,
"end": 2956
} | class ____:
def test_with_http(self) -> None:
assert bcu.websocket_url_for_server_url("http://foo.com") == "ws://foo.com/ws"
assert bcu.websocket_url_for_server_url("http://foo.com/") == "ws://foo.com/ws"
def test_with_https(self) -> None:
assert bcu.websocket_url_for_server_url("https://foo.com") == "wss://foo.com/ws"
assert bcu.websocket_url_for_server_url("https://foo.com/") == "wss://foo.com/ws"
def test_bad_proto(self) -> None:
with pytest.raises(ValueError):
bcu.websocket_url_for_server_url("junk://foo.com")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Test_websocket_url_for_server_url |
python | jschneier__django-storages | tests/test_utils.py | {
"start": 276,
"end": 436
} | class ____(TestCase):
def test_get_setting(self):
value = utils.setting("SECRET_KEY")
self.assertEqual(settings.SECRET_KEY, value)
| SettingTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_styles02.py | {
"start": 380,
"end": 4449
} | class ____(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for simple font styles."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
workbook.add_format({"bold": 1})
workbook.add_format({"italic": 1})
workbook.add_format({"bold": 1, "italic": 1})
workbook._set_default_xf_indices()
workbook._prepare_format_properties()
style._set_style_properties(
[
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_formats,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
workbook.has_comments,
]
)
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="4">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
<font>
<b/>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
<font>
<i/>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
<font>
<b/>
<i/>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="2">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
</fills>
<borders count="1">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="4">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
<xf numFmtId="0" fontId="1" fillId="0" borderId="0" xfId="0" applyFont="1"/>
<xf numFmtId="0" fontId="2" fillId="0" borderId="0" xfId="0" applyFont="1"/>
<xf numFmtId="0" fontId="3" fillId="0" borderId="0" xfId="0" applyFont="1"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="0"/>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleStyles |
python | spack__spack | lib/spack/spack/util/crypto.py | {
"start": 893,
"end": 3541
} | class ____:
def __init__(self, hash_alg, alert_fn, disable_security_check):
self.hash_alg = hash_alg
self.alert_fn = alert_fn
self.disable_security_check = disable_security_check
def __call__(self, disable_alert=False):
if not disable_alert:
self.alert_fn(
"Deprecation warning: {0} checksums will not be"
" supported in future Spack releases.".format(self.hash_alg)
)
if self.disable_security_check:
return hashlib.new(self.hash_alg, usedforsecurity=False) # novermin
else:
return hashlib.new(self.hash_alg)
def hash_fun_for_algo(algo: str) -> HashFactory:
"""Get a function that can perform the specified hash algorithm."""
fun = _hash_functions.get(algo)
if fun:
return fun
elif algo not in _deprecated_hash_algorithms:
_hash_functions[algo] = getattr(hashlib, algo)
else:
try:
deprecated_fun = DeprecatedHash(algo, tty.debug, disable_security_check=False)
# call once to get a ValueError if usedforsecurity is needed
deprecated_fun(disable_alert=True)
except ValueError:
# Some systems may support the 'usedforsecurity' option
# so try with that (but display a warning when it is used)
deprecated_fun = DeprecatedHash(algo, tty.warn, disable_security_check=True)
_hash_functions[algo] = deprecated_fun
return _hash_functions[algo]
def hash_algo_for_digest(hexdigest: str) -> str:
"""Gets name of the hash algorithm for a hex digest."""
algo = _size_to_hash.get(len(hexdigest) // 2)
if algo is None:
raise ValueError(f"Spack knows no hash algorithm for this digest: {hexdigest}")
return algo
def hash_fun_for_digest(hexdigest: str) -> HashFactory:
"""Gets a hash function corresponding to a hex digest."""
return hash_fun_for_algo(hash_algo_for_digest(hexdigest))
def checksum_stream(hashlib_algo: HashFactory, fp: BinaryIO, *, block_size: int = 2**20) -> str:
"""Returns a hex digest of the stream generated using given algorithm from hashlib."""
hasher = hashlib_algo()
while True:
data = fp.read(block_size)
if not data:
break
hasher.update(data)
return hasher.hexdigest()
def checksum(hashlib_algo: HashFactory, filename: str, *, block_size: int = 2**20) -> str:
"""Returns a hex digest of the filename generated using an algorithm from hashlib."""
with open(filename, "rb") as f:
return checksum_stream(hashlib_algo, f, block_size=block_size)
| DeprecatedHash |
python | getsentry__sentry | tests/sentry/uptime/autodetect/test_ranking.py | {
"start": 8386,
"end": 9147
} | class ____(UptimeTestCase):
def test(self) -> None:
assert should_autodetect_for_organization(self.organization)
self.organization.update_option("sentry:uptime_autodetection", False)
assert not should_autodetect_for_organization(self.organization)
self.organization.update_option("sentry:uptime_autodetection", True)
assert should_autodetect_for_organization(self.organization)
def test_quota(self) -> None:
assert should_autodetect_for_organization(self.organization)
detector = self.create_uptime_detector()
assert not should_autodetect_for_organization(self.organization)
detector.delete()
assert should_autodetect_for_organization(self.organization)
| ShouldDetectForOrgTest |
python | bokeh__bokeh | src/bokeh/core/property/dataspec.py | {
"start": 17849,
"end": 18652
} | class ____(UnitsSpec):
""" A |DataSpec| property that accepts numeric fixed values or strings
that refer to columns in a :class:`~bokeh.models.sources.ColumnDataSource`,
and also provides an associated units property to store units information.
Acceptable values for units are ``"screen"`` and ``"data"``.
"""
def __init__(self, default=Undefined, units_default="data", *, help: str | None = None) -> None:
super().__init__(default=default, units_enum=enums.SpatialUnits, units_default=units_default, help=help)
def prepare_value(self, cls, name, value):
try:
if value < 0:
raise ValueError("Distances must be positive!")
except TypeError:
pass
return super().prepare_value(cls, name, value)
| DistanceSpec |
python | walkccc__LeetCode | solutions/2497. Maximum Star Sum of a Graph/2497.py | {
"start": 0,
"end": 560
} | class ____:
def maxStarSum(self, vals: list[int], edges: list[list[int]], k: int) -> int:
n = len(vals)
ans = -math.inf
graph = [[] for _ in range(n)]
for u, v in edges:
graph[u].append((v, vals[v]))
graph[v].append((u, vals[u]))
for i, starSum in enumerate(vals):
maxHeap = []
for _, val in graph[i]:
if val > 0:
heapq.heappush(maxHeap, -val)
j = 0
while j < k and maxHeap:
starSum -= heapq.heappop(maxHeap)
j += 1
ans = max(ans, starSum)
return ans
| Solution |
python | getsentry__sentry | src/sentry/integrations/cursor/integration.py | {
"start": 1991,
"end": 2297
} | class ____(forms.Form):
api_key = forms.CharField(
label=_("Cursor API Key"),
help_text=_("Enter your Cursor API key to call Cursor Agents with."),
widget=forms.PasswordInput(attrs={"placeholder": _("***********************")}),
max_length=255,
)
| CursorAgentConfigForm |
python | huggingface__transformers | src/transformers/models/dac/feature_extraction_dac.py | {
"start": 961,
"end": 7992
} | class ____(SequenceFeatureExtractor):
r"""
Constructs an Dac feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used for padding.
hop_length (`int`, *optional*, defaults to 512):
Overlap length between successive windows.
"""
model_input_names = ["input_values", "n_quantizers"]
def __init__(
self,
feature_size: int = 1,
sampling_rate: int = 16000,
padding_value: float = 0.0,
hop_length: int = 512,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.hop_length = hop_length
def __call__(
self,
raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
padding: Optional[Union[bool, str, PaddingStrategy]] = None,
truncation: Optional[bool] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
`(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
(`feature_size = 2`).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, *optional*, defaults to `False`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
return_tensors (`str` or [`~utils.TensorType`], *optional*, default to 'pt'):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one.")
elif padding is None:
# by default let's pad the inputs
padding = True
is_batched = bool(
isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
elif not is_batched and not isinstance(raw_audio, np.ndarray):
raw_audio = np.asarray(raw_audio, dtype=np.float32)
elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
raw_audio = raw_audio.astype(np.float32)
# always return batch
if not is_batched:
raw_audio = [np.asarray(raw_audio).T]
# verify inputs are valid
for idx, example in enumerate(raw_audio):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2:
raise ValueError("Stereo audio isn't supported for now")
input_values = BatchFeature({"input_values": raw_audio})
# normal padding on batch
padded_inputs = self.pad(
input_values,
max_length=max_length,
truncation=truncation,
padding=padding,
return_attention_mask=padding,
pad_to_multiple_of=self.hop_length,
)
if padding:
padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
if padding:
padded_inputs.input_values = padded_inputs.input_values[:, np.newaxis, :]
input_values = []
for example in padded_inputs.pop("input_values"):
if self.feature_size == 1:
example = example[..., None]
input_values.append(example.T)
padded_inputs["input_values"] = input_values
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
__all__ = ["DacFeatureExtractor"]
| DacFeatureExtractor |
python | PyCQA__pylint | tests/functional/c/consider/consider_using_enumerate.py | {
"start": 1807,
"end": 2131
} | class ____:
def __iter__(self):
# Should not suggest enumerate on self
for i in range(len(self)):
yield self[i]
def does_not_crash_on_range_without_args():
for elem in range():
print(elem)
# False negative described in #3657
# https://github.com/pylint-dev/pylint/issues/3657
| Good |
python | pytorch__pytorch | benchmarks/gpt_fast/quantize.py | {
"start": 2008,
"end": 2775
} | class ____:
def __init__(self, mod):
self.mod = mod
@torch.no_grad()
def create_quantized_state_dict(self):
cur_state_dict = self.mod.state_dict()
for fqn, mod in self.mod.named_modules():
if isinstance(mod, torch.nn.Linear):
int8_weight, scales, _ = dynamically_quantize_per_channel(
mod.weight.float(), -128, 127, torch.int8
)
cur_state_dict[f"{fqn}.weight"] = int8_weight.to("cpu")
cur_state_dict[f"{fqn}.scales"] = scales.to(mod.weight.dtype).to("cpu")
return cur_state_dict
def convert_for_runtime(self):
replace_linear_weight_only_int8_per_channel(self.mod)
return self.mod
| WeightOnlyInt8QuantHandler |
python | pytorch__pytorch | torch/_inductor/cpp_builder.py | {
"start": 34077,
"end": 53322
} | class ____(BuildOptionsBase):
"""
This class is inherited from BuildOptionsBase, and as cxx build options.
This option need contains basic cxx build option, which contains:
1. OS related args.
2. Toolchains related args.
3. Cxx standard related args.
Note:
1. This Options is good for assist modules build, such as x86_isa_help.
"""
def __init__(
self,
compile_only: bool = False,
warning_all: bool = True,
extra_flags: Sequence[str] = (),
use_relative_path: bool = False,
compiler: str = "",
min_optimize: bool = False,
precompiling: bool = False,
preprocessing: bool = False,
) -> None:
super().__init__(
compile_only=compile_only,
use_relative_path=use_relative_path,
precompiling=precompiling,
preprocessing=preprocessing,
)
self._compiler = compiler if compiler else get_cpp_compiler()
(
definitions,
include_dirs,
cflags,
ldflags,
libraries_dirs,
libraries,
passthrough_args,
) = get_cpp_options(
cpp_compiler=self._compiler,
do_link=not (compile_only or precompiling or preprocessing),
extra_flags=extra_flags,
warning_all=warning_all,
min_optimize=min_optimize,
)
_append_list(self._definitions, definitions)
_append_list(self._include_dirs, include_dirs)
_append_list(self._cflags, cflags)
_append_list(self._ldflags, ldflags)
_append_list(self._libraries_dirs, libraries_dirs)
_append_list(self._libraries, libraries)
_append_list(self._passthrough_args, passthrough_args)
self._finalize_options()
def _get_torch_cpp_wrapper_definition() -> list[str]:
return ["TORCH_INDUCTOR_CPP_WRAPPER", "STANDALONE_TORCH_HEADER"]
def _use_custom_generated_macros() -> list[str]:
return [" C10_USING_CUSTOM_GENERATED_MACROS"]
def _use_fb_internal_macros() -> list[str]:
if not _IS_WINDOWS:
if config.is_fbcode():
fb_internal_macros = [
"C10_USE_GLOG",
"C10_USE_MINIMAL_GLOG",
"C10_DISABLE_TENSORIMPL_EXTENSIBILITY",
]
return fb_internal_macros
else:
return []
else:
return []
def _setup_standard_sys_libs(
cpp_compiler: str,
aot_mode: bool,
use_relative_path: bool,
) -> tuple[list[str], list[str], list[str]]:
cflags: list[str] = []
include_dirs: list[str] = []
passthrough_args: list[str] = []
if _IS_WINDOWS:
return cflags, include_dirs, passthrough_args
if config.is_fbcode():
# TODO(T203137008) Can we unify these flags with triton_cc_command?
cflags.append("nostdinc")
# Note that the order of include paths do matter, as a result
# we need to have several branches interleaved here
include_dirs.append(build_paths.sleef_include)
include_dirs.append(build_paths.openmp_include)
include_dirs.append(build_paths.python_include)
include_dirs.append(build_paths.cc_include)
include_dirs.append(build_paths.libgcc_include)
include_dirs.append(build_paths.libgcc_arch_include)
include_dirs.append(build_paths.libgcc_backward_include)
include_dirs.append(build_paths.glibc_include)
include_dirs.append(build_paths.linux_kernel_include)
include_dirs.append("include")
if aot_mode and not use_relative_path:
linker_script = _LINKER_SCRIPT
else:
linker_script = os.path.basename(_LINKER_SCRIPT)
if _is_clang(cpp_compiler):
passthrough_args.append(" --rtlib=compiler-rt")
passthrough_args.append(" -fuse-ld=lld")
passthrough_args.append(f" -Wl,--script={linker_script}")
passthrough_args.append(" -B" + build_paths.glibc_lib)
passthrough_args.append(" -L" + build_paths.glibc_lib)
return cflags, include_dirs, passthrough_args
def _get_build_args_of_chosen_isa(vec_isa: VecISA) -> tuple[list[str], list[str]]:
macros: list[str] = []
build_flags: list[str] = []
if vec_isa != invalid_vec_isa:
# Add Windows support later.
macros.extend(copy.deepcopy(x) for x in vec_isa.build_macro())
build_flags = [vec_isa.build_arch_flags()]
if config.is_fbcode():
cap = str(vec_isa).upper()
macros = [
f"CPU_CAPABILITY={cap}",
f"CPU_CAPABILITY_{cap}",
f"HAVE_{cap}_CPU_DEFINITION",
]
return macros, build_flags
def _get_torch_related_args(
include_pytorch: bool, aot_mode: bool
) -> tuple[list[str], list[str], list[str]]:
from torch.utils.cpp_extension import include_paths, TORCH_LIB_PATH
libraries = []
include_dirs = include_paths()
if config.aot_inductor.link_libtorch:
libraries_dirs = [TORCH_LIB_PATH]
if sys.platform != "darwin" and not config.is_fbcode():
libraries.extend(["torch", "torch_cpu"])
if not aot_mode:
libraries.append("torch_python")
else:
libraries_dirs = []
if config.aot_inductor.cross_target_platform == "windows":
aoti_shim_library = config.aot_inductor.aoti_shim_library
assert aoti_shim_library, (
"'config.aot_inductor.aoti_shim_library' must be set when 'cross_target_platform' is 'windows'."
)
if isinstance(aoti_shim_library, str):
libraries.append(aoti_shim_library)
else:
assert isinstance(aoti_shim_library, list)
libraries.extend(aoti_shim_library)
if config.aot_inductor.cross_target_platform == "windows":
assert config.aot_inductor.aoti_shim_library_path, (
"'config.aot_inductor.aoti_shim_library_path' must be set to the path of the AOTI shim library",
" when 'cross_target_platform' is 'windows'.",
)
libraries_dirs.append(config.aot_inductor.aoti_shim_library_path)
if _IS_WINDOWS:
libraries.append("sleef")
return include_dirs, libraries_dirs, libraries
def _get_python_include_dirs() -> list[str]:
include_dir = Path(sysconfig.get_path("include"))
# On Darwin Python executable from a framework can return
# non-existing /Library/Python/... include path, in which case
# one should use Headers folder from the framework
if not include_dir.exists() and platform.system() == "Darwin":
std_lib = Path(sysconfig.get_path("stdlib"))
include_dir = (std_lib.parent.parent / "Headers").absolute()
if not (include_dir / "Python.h").exists():
warnings.warn(f"Can't find Python.h in {str(include_dir)}")
return [str(include_dir)]
def _get_python_related_args() -> tuple[list[str], list[str]]:
python_include_dirs = _get_python_include_dirs()
python_include_path = sysconfig.get_path(
"include", scheme="nt" if _IS_WINDOWS else "posix_prefix"
)
if python_include_path is not None:
python_include_dirs.append(python_include_path)
if _IS_WINDOWS:
python_lib_path = [
str(
(
Path(sysconfig.get_path("include", scheme="nt")).parent / "libs"
).absolute()
)
]
else:
python_lib_path = [sysconfig.get_config_var("LIBDIR")]
if config.is_fbcode():
python_include_dirs.append(build_paths.python_include)
return python_include_dirs, python_lib_path
@functools.cache
def is_conda_llvm_openmp_installed() -> bool:
try:
command = "conda list llvm-openmp --json"
output = subprocess.check_output(command.split()).decode("utf8")
return len(json.loads(output)) > 0
except (subprocess.SubprocessError, FileNotFoundError):
return False
@functools.cache
def homebrew_libomp() -> tuple[bool, str]:
try:
# check if `brew` is installed
if shutil.which("brew") is None:
return False, ""
# get the location of `libomp` if it is installed
# this is the location that `libomp` **would** be installed
# see https://github.com/Homebrew/brew/issues/10261#issuecomment-756563567 for details
libomp_path = (
subprocess.check_output(["brew", "--prefix", "libomp"])
.decode("utf8")
.strip()
)
# check if `libomp` is installed
omp_available = os.path.exists(libomp_path)
return omp_available, libomp_path
except subprocess.SubprocessError:
return False, ""
@functools.cache
def perload_clang_libomp_win(cpp_compiler: str, omp_name: str) -> None:
try:
output = subprocess.check_output([cpp_compiler, "-print-file-name=bin"]).decode(
"utf8"
)
omp_path = os.path.join(output.rstrip(), omp_name)
if os.path.isfile(omp_path):
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
cdll.LoadLibrary(omp_path)
except subprocess.SubprocessError:
pass
@functools.cache
def perload_icx_libomp_win(cpp_compiler: str) -> None:
def _load_icx_built_in_lib_by_name(cpp_compiler: str, lib_name: str) -> bool:
try:
output = subprocess.check_output(
[cpp_compiler, f"-print-file-name={lib_name}"],
stderr=subprocess.DEVNULL,
).decode(*SUBPROCESS_DECODE_ARGS)
omp_path = output.rstrip()
if os.path.isfile(omp_path):
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
cdll.LoadLibrary(omp_path)
return True
except subprocess.SubprocessError:
pass
return False
"""
Intel Compiler implemented more math libraries than clang, for performance proposal.
We need preload them like openmp library.
"""
preload_list = [
"libiomp5md.dll", # openmp
"svml_dispmd.dll", # svml library
"libmmd.dll", # libm
]
for lib_name in preload_list:
_load_icx_built_in_lib_by_name(cpp_compiler, lib_name)
def _get_openmp_args(
cpp_compiler: str,
) -> tuple[list[str], list[str], list[str], list[str], list[str], list[str]]:
cflags: list[str] = []
ldflags: list[str] = []
include_dir_paths: list[str] = []
lib_dir_paths: list[str] = []
libs: list[str] = []
passthrough_args: list[str] = []
if config.aot_inductor.cross_target_platform == "windows":
return cflags, ldflags, include_dir_paths, lib_dir_paths, libs, passthrough_args
if _IS_MACOS:
# Per https://mac.r-project.org/openmp/ right way to pass `openmp` flags to MacOS is via `-Xclang`
cflags.append("Xclang")
cflags.append("fopenmp")
# only Apple builtin compilers (Apple Clang++) require openmp
omp_available = not _is_apple_clang(cpp_compiler)
# check the `OMP_PREFIX` environment first
omp_prefix = os.getenv("OMP_PREFIX")
if omp_prefix is not None:
header_path = os.path.join(omp_prefix, "include", "omp.h")
valid_env = os.path.exists(header_path)
if valid_env:
include_dir_paths.append(os.path.join(omp_prefix, "include"))
lib_dir_paths.append(os.path.join(omp_prefix, "lib"))
else:
warnings.warn("environment variable `OMP_PREFIX` is invalid.")
omp_available = omp_available or valid_env
if not omp_available:
libs.append("omp")
# prefer to use openmp from `conda install llvm-openmp`
conda_prefix = os.getenv("CONDA_PREFIX")
if not omp_available and conda_prefix is not None:
omp_available = is_conda_llvm_openmp_installed()
if omp_available:
conda_lib_path = os.path.join(conda_prefix, "lib")
include_dir_paths.append(os.path.join(conda_prefix, "include"))
lib_dir_paths.append(conda_lib_path)
# Prefer Intel OpenMP on x86 machine
if os.uname().machine == "x86_64" and os.path.exists(
os.path.join(conda_lib_path, "libiomp5.dylib")
):
libs.append("iomp5")
# next, try to use openmp from `brew install libomp`
if not omp_available:
omp_available, libomp_path = homebrew_libomp()
if omp_available:
include_dir_paths.append(os.path.join(libomp_path, "include"))
lib_dir_paths.append(os.path.join(libomp_path, "lib"))
# if openmp is still not available, we let the compiler to have a try,
# and raise error together with instructions at compilation error later
elif _IS_WINDOWS:
"""
On Windows, `clang` and `icx` have their specific openmp implenmention.
And the openmp lib is in compiler's some sub-directory.
For dynamic library(DLL) load, the Windows native APIs are `LoadLibraryA` and `LoadLibraryExA`, and their search
dependencies have some rules:
https://learn.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexa#searching-for-dlls-and-dependencies
In some case, the rules may not include compiler's sub-directories.
So, it can't search and load compiler's openmp library correctly.
And then, the whole application would be broken.
To avoid the openmp load failed, we can automatic locate the openmp binary and preload it.
1. For clang, the function is `perload_clang_libomp_win`.
2. For icx, the function is `perload_icx_libomp_win`.
"""
if _is_clang(cpp_compiler):
cflags.append("openmp")
libs.append("libomp")
perload_clang_libomp_win(cpp_compiler, "libomp.dll")
elif _is_intel_compiler(cpp_compiler):
cflags.append("Qiopenmp")
libs.append("libiomp5md")
perload_icx_libomp_win(cpp_compiler)
else:
# /openmp, /openmp:llvm
# llvm on Windows, new openmp: https://devblogs.microsoft.com/cppblog/msvc-openmp-update/
# msvc openmp: https://learn.microsoft.com/zh-cn/cpp/build/reference/openmp-enable-openmp-2-0-support?view=msvc-170
cflags.append("openmp")
cflags.append("openmp:experimental") # MSVC CL
else:
if config.is_fbcode():
include_dir_paths.append(build_paths.openmp_include)
openmp_lib = build_paths.openmp_lib_so
fb_openmp_extra_flags = f"-Wp,-fopenmp {openmp_lib}"
passthrough_args.append(fb_openmp_extra_flags)
libs.append("omp")
else:
if _is_clang(cpp_compiler):
# TODO: fix issue, can't find omp.h
cflags.append("fopenmp")
libs.append("gomp")
elif _is_intel_compiler(cpp_compiler):
cflags.append("fiopenmp")
else:
cflags.append("fopenmp")
libs.append("gomp")
return cflags, ldflags, include_dir_paths, lib_dir_paths, libs, passthrough_args
def _get_libstdcxx_args() -> tuple[list[str], list[str]]:
"""
For fbcode cpu case, we should link stdc++ instead assuming the binary where dlopen is executed is built with dynamic stdc++.
"""
lib_dir_paths: list[str] = []
libs: list[str] = []
if config.is_fbcode():
lib_dir_paths = [sysconfig.get_config_var("LIBDIR")]
libs.append("stdc++")
return lib_dir_paths, libs
def get_mmap_self_macro(
use_mmap_weights: bool, use_mmap_weights_external: bool
) -> list[str]:
macros = []
if use_mmap_weights and use_mmap_weights_external:
raise RuntimeError(
"Only one of use_mmap_weights and use_mmap_weights_external should be true"
)
if use_mmap_weights:
macros.append(" USE_MMAP_SELF")
elif use_mmap_weights_external:
macros.append(" USE_MMAP_EXTERNAL")
return macros
def get_caching_allocator_macro() -> list[str]:
from torch._inductor import config
macros = []
if config.aot_inductor.weight_use_caching_allocator:
macros.append(" AOT_INDUCTOR_USE_CACHING_ALLOCATOR")
return macros
def get_cpp_torch_options(
cpp_compiler: str,
vec_isa: VecISA,
include_pytorch: bool,
aot_mode: bool,
use_relative_path: bool,
use_mmap_weights: bool,
use_mmap_weights_external: bool,
) -> tuple[list[str], list[str], list[str], list[str], list[str], list[str], list[str]]:
"""
This function is used to get the build args of torch related build options.
1. Torch include_directories, libraries, libraries_directories.
2. Python include_directories, libraries, libraries_directories.
3. OpenMP related.
4. Torch MACROs.
5. MISC
6. Return the build args
"""
definitions: list[str] = []
include_dirs: list[str] = []
cflags: list[str] = []
ldflags: list[str] = []
libraries_dirs: list[str] = []
libraries: list[str] = []
passthrough_args: list[str] = []
torch_cpp_wrapper_definitions = _get_torch_cpp_wrapper_definition()
use_custom_generated_macros_definitions = _use_custom_generated_macros()
(
sys_libs_cflags,
sys_libs_include_dirs,
sys_libs_passthrough_args,
) = _setup_standard_sys_libs(cpp_compiler, aot_mode, use_relative_path)
isa_macros, isa_ps_args_build_flags = _get_build_args_of_chosen_isa(vec_isa)
(
torch_include_dirs,
torch_libraries_dirs,
torch_libraries,
) = _get_torch_related_args(include_pytorch=include_pytorch, aot_mode=aot_mode)
python_include_dirs, python_libraries_dirs = _get_python_related_args()
(
omp_cflags,
omp_ldflags,
omp_include_dir_paths,
omp_lib_dir_paths,
omp_lib,
omp_passthrough_args,
) = _get_openmp_args(cpp_compiler)
fb_macro_passthrough_args = _use_fb_internal_macros()
mmap_self_macros = get_mmap_self_macro(use_mmap_weights, use_mmap_weights_external)
caching_allocator_macros = get_caching_allocator_macro()
definitions = (
torch_cpp_wrapper_definitions
+ use_custom_generated_macros_definitions
+ isa_macros
+ fb_macro_passthrough_args
+ mmap_self_macros
+ caching_allocator_macros
)
include_dirs = (
sys_libs_include_dirs
+ python_include_dirs
+ torch_include_dirs
+ omp_include_dir_paths
)
cflags = sys_libs_cflags + omp_cflags
ldflags = omp_ldflags
libraries_dirs = python_libraries_dirs + torch_libraries_dirs + omp_lib_dir_paths
libraries = torch_libraries + omp_lib
passthrough_args = (
sys_libs_passthrough_args + isa_ps_args_build_flags + omp_passthrough_args
)
return (
definitions,
include_dirs,
cflags,
ldflags,
libraries_dirs,
libraries,
passthrough_args,
)
| CppOptions |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_ltc_address.py | {
"start": 890,
"end": 1891
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_ltc_address"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_ltc_address(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidLtcAddress |
python | sympy__sympy | sympy/physics/secondquant.py | {
"start": 19298,
"end": 23188
} | class ____(FermionicOperator, Creator):
"""
Fermionic creation operator.
"""
op_symbol = 'f+'
def _dagger_(self):
return AnnihilateFermion(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if isinstance(state, FockStateFermionKet):
element = self.state
return state.up(element)
elif isinstance(state, Mul):
c_part, nc_part = state.args_cnc()
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part + [nc_part[0].up(element)] + nc_part[1:]))
return Mul(self, state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_q_creator
1
>>> Fd(i).is_q_creator
0
>>> Fd(p).is_q_creator
1
"""
if self.is_above_fermi:
return 1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=1)
>>> i = Symbol('i', below_fermi=1)
>>> p = Symbol('p')
>>> Fd(a).is_q_annihilator
0
>>> Fd(i).is_q_annihilator
-1
>>> Fd(p).is_q_annihilator
-1
"""
if self.is_below_fermi:
return -1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_creator
True
>>> Fd(i).is_only_q_creator
False
>>> Fd(p).is_only_q_creator
False
"""
return self.is_only_above_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_annihilator
False
>>> Fd(i).is_only_q_annihilator
True
>>> Fd(p).is_only_q_annihilator
False
"""
return self.is_only_below_fermi
def __repr__(self):
return "CreateFermion(%s)" % self.state
def _latex(self, printer):
if self.state is S.Zero:
return "{a^\\dagger_{0}}"
else:
return "{a^\\dagger_{%s}}" % printer._print(self.state)
Fd = CreateFermion
F = AnnihilateFermion
| CreateFermion |
python | pytorch__pytorch | torch/_inductor/loop_body.py | {
"start": 2115,
"end": 2293
} | class ____(NamedTuple):
index_name: str # LoopBody.indexing_exprs[index_name]
buffer_name: Optional[str]
mode: Optional[str] # V.ops.store(..., mode=mode)
| MemoryEntry |
python | huggingface__transformers | src/transformers/models/perception_lm/modular_perception_lm.py | {
"start": 3129,
"end": 4259
} | class ____(LlavaModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
Image hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_videos, sequence_length, hidden_size)`.
Video hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
video_hidden_states: Optional[torch.FloatTensor] = None
| PerceptionLMModelOutputWithPast |
python | python-openxml__python-docx | src/docx/oxml/numbering.py | {
"start": 2659,
"end": 3965
} | class ____(BaseOxmlElement):
"""``<w:numbering>`` element, the root element of a numbering part, i.e.
numbering.xml."""
num = ZeroOrMore("w:num", successors=("w:numIdMacAtCleanup",))
def add_num(self, abstractNum_id):
"""Return a newly added CT_Num (<w:num>) element referencing the abstract
numbering definition identified by `abstractNum_id`."""
next_num_id = self._next_numId
num = CT_Num.new(next_num_id, abstractNum_id)
return self._insert_num(num)
def num_having_numId(self, numId):
"""Return the ``<w:num>`` child element having ``numId`` attribute matching
`numId`."""
xpath = './w:num[@w:numId="%d"]' % numId
try:
return self.xpath(xpath)[0]
except IndexError:
raise KeyError("no <w:num> element with numId %d" % numId)
@property
def _next_numId(self):
"""The first ``numId`` unused by a ``<w:num>`` element, starting at 1 and
filling any gaps in numbering between existing ``<w:num>`` elements."""
numId_strs = self.xpath("./w:num/@w:numId")
num_ids = [int(numId_str) for numId_str in numId_strs]
for num in range(1, len(num_ids) + 2):
if num not in num_ids:
break
return num
| CT_Numbering |
python | huggingface__transformers | src/transformers/models/gemma/modular_gemma.py | {
"start": 10592,
"end": 13491
} | class ____(LlamaModel):
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
causal_mask_mapping = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
# embed positions
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
# normalized
# Gemma downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5
# See https://github.com/huggingface/transformers/pull/29402
normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
hidden_states = hidden_states * normalizer
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| GemmaModel |
python | openai__openai-python | src/openai/types/chat/chat_completion_audio.py | {
"start": 157,
"end": 655
} | class ____(BaseModel):
id: str
"""Unique identifier for this audio response."""
data: str
"""
Base64 encoded audio bytes generated by the model, in the format specified in
the request.
"""
expires_at: int
"""
The Unix timestamp (in seconds) for when this audio response will no longer be
accessible on the server for use in multi-turn conversations.
"""
transcript: str
"""Transcript of the audio generated by the model."""
| ChatCompletionAudio |
python | kamyu104__LeetCode-Solutions | Python/minimum-fuel-cost-to-report-to-the-capital.py | {
"start": 45,
"end": 1310
} | class ____(object):
def minimumFuelCost(self, roads, seats):
"""
:type roads: List[List[int]]
:type seats: int
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
def iter_dfs():
result = 0
stk = [(1, (0, -1, 0, [1]))]
while stk:
step, args = stk.pop()
if step == 1:
u, p, d, ret = args
stk.append((3, (d, ret)))
for v in adj[u]:
if v == p:
continue
new_ret = [1]
stk.append((2, (new_ret, ret)))
stk.append((1, (v, u, d+1, new_ret)))
elif step == 2:
new_ret, ret = args
ret[0] += new_ret[0]
elif step == 3:
d, ret = args
if d:
result += ceil_divide(ret[0], seats)
return result
adj = [[] for _ in xrange(len(roads)+1)]
for u, v in roads:
adj[u].append(v)
adj[v].append(u)
return iter_dfs()
# Time: O(n)
# Space: O(h)
# dfs
| Solution |
python | keon__algorithms | tests/test_array.py | {
"start": 13144,
"end": 13495
} | class ____(unittest.TestCase):
def test_limit(self):
self.assertListEqual(limit([1, 2, 3, 4, 5]), [1, 2, 3, 4, 5])
self.assertListEqual(limit([1, 2, 3, 4, 5], 2, 4), [2, 3, 4])
self.assertListEqual(limit([1, 2, 3, 4, 5], 2), [2, 3, 4, 5])
self.assertListEqual(limit([1, 2, 3, 4, 5], None, 4), [1, 2, 3, 4])
| TestLimit |
python | walkccc__LeetCode | solutions/3353. Minimum Total Operations/3353.py | {
"start": 0,
"end": 127
} | class ____:
def minOperations(self, nums: list[int]) -> int:
return sum(a != b for a, b in itertools.pairwise(nums))
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 27581,
"end": 28311
} | class ____(AssetSelection):
selected_asset_keys: Sequence[AssetKey]
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
return set()
def resolve_checks_inner( # pyright: ignore[reportIncompatibleMethodOverride]
self, asset_graph: AssetGraph, allow_missing: bool
) -> AbstractSet[AssetCheckKey]:
return {
handle
for handle in asset_graph.asset_check_keys
if handle.asset_key in self.selected_asset_keys
}
def to_serializable_asset_selection(self, asset_graph: BaseAssetGraph) -> "AssetSelection":
return self
@whitelist_for_serdes
@record
| AssetChecksForAssetKeysSelection |
python | ray-project__ray | python/ray/dag/dag_operation_future.py | {
"start": 641,
"end": 1174
} | class ____(DAGOperationFuture):
"""
A future that is already resolved. Calling `wait()` on this will
immediately return the result without blocking.
"""
def __init__(self, result):
"""
Initialize a resolved future.
Args:
result: The result of the future.
"""
self._result = result
def wait(self):
"""
Wait and immediately return the result. This operation will not block.
"""
return self._result
@DeveloperAPI
| ResolvedFuture |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 1177,
"end": 1530
} | class ____(Benchmark):
def setup(self):
rng = np.random.default_rng(12345678)
self.a = rng.random((6,3)) * 10
self.b = rng.random((6,3)) * 10
self.c = rng.random((6,3)) * 10
def time_f_oneway(self):
stats.f_oneway(self.a, self.b, self.c)
stats.f_oneway(self.a, self.b, self.c, axis=1)
| ANOVAFunction |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.