language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/granitemoeshared/modular_granitemoeshared.py | {
"start": 4898,
"end": 5058
} | class ____(GraniteMoePreTrainedModel):
config: GraniteMoeSharedConfig
_no_split_modules = ["GraniteMoeSharedDecoderLayer"]
| GraniteMoeSharedPreTrainedModel |
python | getsentry__sentry | tests/sentry/relocation/api/endpoints/artifacts/test_index.py | {
"start": 575,
"end": 1510
} | class ____(APITestCase):
endpoint = "sentry-api-0-relocations-artifacts-index"
method = "GET"
def setUp(self) -> None:
super().setUp()
self.owner = self.create_user(email="owner@example.com", is_superuser=False, is_staff=False)
self.superuser = self.create_user(is_superuser=True)
self.staff_user = self.create_user(is_staff=True)
self.relocation: Relocation = Relocation.objects.create(
date_added=TEST_DATE_ADDED,
creator_id=self.owner.id,
owner_id=self.owner.id,
status=Relocation.Status.PAUSE.value,
step=Relocation.Step.PREPROCESSING.value,
want_org_slugs=["foo"],
want_usernames=["alice", "bob"],
latest_notified=Relocation.EmailKind.STARTED.value,
latest_task=OrderedTask.PREPROCESSING_SCAN.name,
latest_task_attempts=1,
)
| GetRelocationArtifactsTest |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_metaclass_py3.py | {
"start": 245,
"end": 306
} | class ____(metaclass=valid_metaclass_1):
pass
| GoodMetaclass |
python | django__django | tests/modeladmin/test_checks.py | {
"start": 48275,
"end": 51459
} | class ____(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ["name", "slug", "pub_date"]
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_first_item(self):
"""
The first item in list_display can be the same as the first in
list_editable.
"""
class ProductAdmin(ModelAdmin):
list_display = ["name", "slug", "pub_date"]
list_editable = ["name", "slug"]
list_display_links = ["pub_date"]
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_in_list_editable(self):
"""
The first item in list_display can be in list_editable as long as
list_display_links is defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ["name", "slug", "pub_date"]
list_editable = ["slug", "name"]
list_display_links = ["pub_date"]
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be the same as the first item
in list_editable if list_display_links is not defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ["name"]
list_editable = ["name"]
self.assertIsInvalid(
ProductAdmin,
ValidationTestModel,
"The value of 'list_editable[0]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id="admin.E124",
)
def test_list_display_first_item_in_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be in list_editable if
list_display_links isn't defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ["name", "slug", "pub_date"]
list_editable = ["slug", "name"]
self.assertIsInvalid(
ProductAdmin,
ValidationTestModel,
"The value of 'list_editable[1]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id="admin.E124",
)
def test_both_list_editable_and_list_display_links(self):
class ProductAdmin(ModelAdmin):
list_editable = ("name",)
list_display = ("name",)
list_display_links = ("name",)
self.assertIsInvalid(
ProductAdmin,
ValidationTestModel,
"The value of 'name' cannot be in both 'list_editable' and "
"'list_display_links'.",
id="admin.E123",
)
| ListDisplayEditableTests |
python | pyqtgraph__pyqtgraph | pyqtgraph/GraphicsScene/exportDialogTemplate_generic.py | {
"start": 347,
"end": 2922
} | class ____(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(241, 367)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(Form)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 3)
self.itemTree = QtWidgets.QTreeWidget(Form)
self.itemTree.setObjectName("itemTree")
self.itemTree.headerItem().setText(0, "1")
self.itemTree.header().setVisible(False)
self.gridLayout.addWidget(self.itemTree, 1, 0, 1, 3)
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 3)
self.formatList = QtWidgets.QListWidget(Form)
self.formatList.setObjectName("formatList")
self.gridLayout.addWidget(self.formatList, 3, 0, 1, 3)
self.exportBtn = QtWidgets.QPushButton(Form)
self.exportBtn.setObjectName("exportBtn")
self.gridLayout.addWidget(self.exportBtn, 6, 1, 1, 1)
self.closeBtn = QtWidgets.QPushButton(Form)
self.closeBtn.setObjectName("closeBtn")
self.gridLayout.addWidget(self.closeBtn, 6, 2, 1, 1)
self.paramTree = ParameterTree(Form)
self.paramTree.setColumnCount(2)
self.paramTree.setObjectName("paramTree")
self.paramTree.headerItem().setText(0, "1")
self.paramTree.header().setVisible(False)
self.gridLayout.addWidget(self.paramTree, 5, 0, 1, 3)
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 4, 0, 1, 3)
self.copyBtn = QtWidgets.QPushButton(Form)
self.copyBtn.setObjectName("copyBtn")
self.gridLayout.addWidget(self.copyBtn, 6, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Export"))
self.label.setText(_translate("Form", "Item to export:"))
self.label_2.setText(_translate("Form", "Export format"))
self.exportBtn.setText(_translate("Form", "Export"))
self.closeBtn.setText(_translate("Form", "Close"))
self.label_3.setText(_translate("Form", "Export options"))
self.copyBtn.setText(_translate("Form", "Copy"))
from ..parametertree import ParameterTree
| Ui_Form |
python | numpy__numpy | numpy/f2py/tests/test_callback.py | {
"start": 6729,
"end": 7099
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "gh26681.f90")]
options = ['--lower']
def test_callstatement_fortran(self):
with pytest.raises(ValueError, match='helpme') as exc:
self.module.mypy_abort = self.module.utils.my_abort
self.module.utils.do_something('helpme')
| TestCBFortranCallstatement |
python | sqlalchemy__sqlalchemy | test/ext/asyncio/test_engine.py | {
"start": 33941,
"end": 37475
} | class ____(EngineFixture):
"""The engine events all run in their normal synchronous context.
we do not provide an asyncio event interface at this time.
"""
__backend__ = True
@async_test
async def test_no_async_listeners(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "before_cursor_execute", mock.Mock())
async with async_engine.connect() as conn:
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(conn, "before_cursor_execute", mock.Mock())
@async_test
async def test_no_async_listeners_dialect_event(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "do_execute", mock.Mock())
@async_test
async def test_no_async_listeners_pool_event(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "checkout", mock.Mock())
@async_test
async def test_sync_before_cursor_execute_engine(self, async_engine):
canary = mock.Mock()
event.listen(async_engine.sync_engine, "before_cursor_execute", canary)
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
await conn.execute(select(1))
s1 = str(select(1).compile(async_engine))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, s1, mock.ANY, mock.ANY, False)],
)
@async_test
async def test_sync_before_cursor_execute_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
event.listen(
async_engine.sync_engine, "before_cursor_execute", canary
)
await conn.execute(select(1))
s1 = str(select(1).compile(async_engine))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, s1, mock.ANY, mock.ANY, False)],
)
@async_test
async def test_event_on_sync_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
event.listen(conn.sync_connection, "begin", canary)
async with conn.begin():
eq_(
canary.mock_calls,
[mock.call(conn.sync_connection)],
)
| AsyncEventTest |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 27170,
"end": 27279
} | class ____(BaseModel, extra="forbid"):
datetime_key: str = Field(..., description="")
| DatetimeKeyExpression |
python | etianen__django-reversion | reversion/middleware.py | {
"start": 73,
"end": 679
} | class ____:
"""Wraps the entire request in a revision."""
manage_manually = False
using = None
atomic = True
def __init__(self, get_response):
self.get_response = create_revision(
manage_manually=self.manage_manually,
using=self.using,
atomic=self.atomic,
request_creates_revision=self.request_creates_revision
)(get_response)
def request_creates_revision(self, request):
return _request_creates_revision(request)
def __call__(self, request):
return self.get_response(request)
| RevisionMiddleware |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_bash_code_execution_result_block.py | {
"start": 317,
"end": 525
} | class ____(BaseModel):
content: List[BetaBashCodeExecutionOutputBlock]
return_code: int
stderr: str
stdout: str
type: Literal["bash_code_execution_result"]
| BetaBashCodeExecutionResultBlock |
python | getsentry__sentry | src/sentry/snuba/metrics/query.py | {
"start": 3958,
"end": 4072
} | class ____(MetricActionByField):
direction: Direction = Direction.ASC
@dataclass(frozen=True)
| MetricOrderByField |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-pdb/llama_index/readers/pdb/base.py | {
"start": 265,
"end": 1275
} | class ____(BaseReader):
"""Protein Data Bank entries' primary citation abstract reader."""
def __init__(self) -> None:
super().__init__()
def load_data(self, pdb_ids: List[str]) -> List[Document]:
"""
Load data from RCSB or EBI REST API.
Args:
pdb_ids (List[str]): List of PDB ids \
for which primary citation abstract are to be read.
"""
results = []
for pdb_id in pdb_ids:
title, abstracts = get_pdb_abstract(pdb_id)
primary_citation = abstracts[title]
abstract = primary_citation["abstract"]
abstract_text = "\n".join(
["\n".join([str(k), str(v)]) for k, v in abstract.items()]
)
results.append(
Document(
text=abstract_text,
extra_info={"pdb_id": pdb_id, "primary_citation": primary_citation},
)
)
return results
| PdbAbstractReader |
python | openai__openai-python | src/openai/types/eval_create_params.py | {
"start": 3088,
"end": 3488
} | class ____(TypedDict, total=False):
type: Required[Literal["stored_completions"]]
"""The type of data source. Always `stored_completions`."""
metadata: Dict[str, object]
"""Metadata filters for the stored completions data source."""
DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigLogs, DataSourceConfigStoredCompletions]
| DataSourceConfigStoredCompletions |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 105850,
"end": 106257
} | class ____(Structure):
_fields_ = [('version', c_uint),
('environment', c_uint),
('ccFeature', c_uint),
('devToolsMode', c_uint),
('multiGpuMode', c_uint),
]
def __init__(self):
super(c_nvmlSystemConfComputeSettings_v1_t, self).__init__(version=nvmlSystemConfComputeSettings_v1)
| c_nvmlSystemConfComputeSettings_v1_t |
python | huggingface__transformers | src/transformers/models/sam3/configuration_sam3.py | {
"start": 11866,
"end": 13952
} | class ____(PreTrainedConfig):
r"""
Configuration class for SAM3 DETR Decoder (object query decoder).
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the decoder layers.
num_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
num_queries (`int`, *optional*, defaults to 200):
Number of object queries.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the feedforward layers.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability.
hidden_act (`str`, *optional*, defaults to `"relu"`):
Activation function in FFN.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_detr_decoder"
def __init__(
self,
hidden_size=256,
num_layers=6,
num_queries=200,
num_attention_heads=8,
intermediate_size=2048,
dropout=0.1,
hidden_act="relu",
hidden_dropout=0.0,
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_queries = num_queries
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
| Sam3DETRDecoderConfig |
python | zarr-developers__zarr-python | src/zarr/core/config.py | {
"start": 1462,
"end": 1526
} | class ____(ValueError):
_msg = "bad Config: %r"
| BadConfigError |
python | numpy__numpy | numpy/f2py/tests/test_return_complex.py | {
"start": 79,
"end": 1818
} | class ____(util.F2PyTest):
def check_function(self, t, tname):
if tname in ["t0", "t8", "s0", "s8"]:
err = 1e-5
else:
err = 0.0
assert abs(t(234j) - 234.0j) <= err
assert abs(t(234.6) - 234.6) <= err
assert abs(t(234) - 234.0) <= err
assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err
# assert abs(t('234')-234.)<=err
# assert abs(t('234.6')-234.6)<=err
assert abs(t(-234) + 234.0) <= err
assert abs(t([234]) - 234.0) <= err
assert abs(t((234, )) - 234.0) <= err
assert abs(t(array(234)) - 234.0) <= err
assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err
assert abs(t(array([234])) - 234.0) <= err
assert abs(t(array([[234]])) - 234.0) <= err
assert abs(t(array([234]).astype("b")) + 22.0) <= err
assert abs(t(array([234], "h")) - 234.0) <= err
assert abs(t(array([234], "i")) - 234.0) <= err
assert abs(t(array([234], "l")) - 234.0) <= err
assert abs(t(array([234], "q")) - 234.0) <= err
assert abs(t(array([234], "f")) - 234.0) <= err
assert abs(t(array([234], "d")) - 234.0) <= err
assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err
assert abs(t(array([234], "D")) - 234.0) <= err
# pytest.raises(TypeError, t, array([234], 'S1'))
pytest.raises(TypeError, t, "abc")
pytest.raises(IndexError, t, [])
pytest.raises(IndexError, t, ())
pytest.raises(TypeError, t, t)
pytest.raises(TypeError, t, {})
try:
r = t(10**400)
assert repr(r) in ["(inf+0j)", "(Infinity+0j)"]
except OverflowError:
pass
| TestReturnComplex |
python | doocs__leetcode | solution/0300-0399/0315.Count of Smaller Numbers After Self/Solution2.py | {
"start": 95,
"end": 1180
} | class ____:
def __init__(self, n):
self.tr = [Node() for _ in range(n << 2)]
self.build(1, 1, n)
def build(self, u, l, r):
self.tr[u].l = l
self.tr[u].r = r
if l == r:
return
mid = (l + r) >> 1
self.build(u << 1, l, mid)
self.build(u << 1 | 1, mid + 1, r)
def modify(self, u, x, v):
if self.tr[u].l == x and self.tr[u].r == x:
self.tr[u].v += v
return
mid = (self.tr[u].l + self.tr[u].r) >> 1
if x <= mid:
self.modify(u << 1, x, v)
else:
self.modify(u << 1 | 1, x, v)
self.pushup(u)
def query(self, u, l, r):
if self.tr[u].l >= l and self.tr[u].r <= r:
return self.tr[u].v
mid = (self.tr[u].l + self.tr[u].r) >> 1
v = 0
if l <= mid:
v += self.query(u << 1, l, r)
if r > mid:
v += self.query(u << 1 | 1, l, r)
return v
def pushup(self, u):
self.tr[u].v = self.tr[u << 1].v + self.tr[u << 1 | 1].v
| SegmentTree |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/executors/ecs/test_ecs_executor.py | {
"start": 52951,
"end": 81940
} | class ____:
@pytest.fixture
def assign_subnets(self):
with conf_vars({(CONFIG_GROUP_NAME, AllEcsConfigKeys.SUBNETS): "sub1,sub2"}):
yield
@pytest.fixture
def assign_container_name(self):
with conf_vars({(CONFIG_GROUP_NAME, AllEcsConfigKeys.CONTAINER_NAME): "foobar"}):
yield
def test_flatten_dict(self):
nested_dict = {"a": "a", "b": "b", "c": {"d": "d"}}
assert _recursive_flatten_dict(nested_dict) == {"a": "a", "b": "b", "d": "d"}
@pytest.mark.skipif(
RUNNING_TESTS_AGAINST_AIRFLOW_PACKAGES,
reason="Config defaults are validated against provider.yaml so this test "
"should only run when tests are run from sources",
)
def test_validate_config_defaults(self):
"""Assert that the defaults stated in the config.yml file match those in utils.CONFIG_DEFAULTS.
This test should only be run to verify configuration defaults are the same when it is run from
airflow sources, not when airflow is installed from packages, because airflow installed from packages
will not have the provider.yml file.
"""
from airflow.providers.amazon import __file__ as provider_path
config_filename = os.path.join(os.path.dirname(provider_path), "provider.yaml")
with open(config_filename) as config:
options = yaml.safe_load(config)["config"][CONFIG_GROUP_NAME]["options"]
file_defaults = {
option: default for (option, value) in options.items() if (default := value.get("default"))
}
assert len(file_defaults) == len(CONFIG_DEFAULTS)
for key in file_defaults.keys():
assert file_defaults[key] == CONFIG_DEFAULTS[key]
def test_subnets_required(self):
conf_overrides = {
(CONFIG_GROUP_NAME, AllEcsConfigKeys.SUBNETS): None,
(CONFIG_GROUP_NAME, AllEcsConfigKeys.REGION_NAME): "us-west-1",
(CONFIG_GROUP_NAME, AllEcsConfigKeys.CLUSTER): "some-cluster",
(CONFIG_GROUP_NAME, AllEcsConfigKeys.CONTAINER_NAME): "container-name",
(CONFIG_GROUP_NAME, AllEcsConfigKeys.TASK_DEFINITION): "some-task-def",
(CONFIG_GROUP_NAME, AllEcsConfigKeys.LAUNCH_TYPE): "FARGATE",
(CONFIG_GROUP_NAME, AllEcsConfigKeys.PLATFORM_VERSION): "LATEST",
(CONFIG_GROUP_NAME, AllEcsConfigKeys.ASSIGN_PUBLIC_IP): "False",
(CONFIG_GROUP_NAME, AllEcsConfigKeys.SECURITY_GROUPS): "sg1,sg2",
}
with conf_vars(conf_overrides):
with pytest.raises(ValueError, match="At least one subnet is required to run a task"):
ecs_executor_config.build_task_kwargs(conf)
# TODO: When merged this needs updating to the actually supported version
@pytest.mark.skip(
reason="Test requires a version of airflow which includes updates to support multi team"
)
def test_team_config(self):
# Team name to be used throughout
team_name = "team_a"
# Patch environment to include two sets of configs for the ECS executor. One that is related to a
# team and one that is not. The we will create two executors (one with a team and one without) and
# ensure the correct configs are used.
config_overrides = [
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.CLUSTER}", "some_cluster"),
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.CONTAINER_NAME}", "container_name"),
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.TASK_DEFINITION}", "some_task_def"),
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.LAUNCH_TYPE}", "FARGATE"),
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.PLATFORM_VERSION}", "LATEST"),
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.ASSIGN_PUBLIC_IP}", "False"),
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.SECURITY_GROUPS}", "sg1,sg2"),
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.SUBNETS}", "sub1,sub2"),
(f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.REGION_NAME}", "us-west-1"),
# team Config
(f"AIRFLOW__{team_name}___{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.CLUSTER}", "team_a_cluster"),
(
f"AIRFLOW__{team_name}___{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.CONTAINER_NAME}",
"team_a_container",
),
(
f"AIRFLOW__{team_name}___{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.TASK_DEFINITION}",
"team_a_task_def",
),
(f"AIRFLOW__{team_name}___{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.LAUNCH_TYPE}", "EC2"),
(
f"AIRFLOW__{team_name}___{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.SECURITY_GROUPS}",
"team_a_sg1,team_a_sg2",
),
(
f"AIRFLOW__{team_name}___{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.SUBNETS}",
"team_a_sub1,team_a_sub2",
),
(f"AIRFLOW__{team_name}___{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.REGION_NAME}", "us-west-2"),
]
with patch("os.environ", {key.upper(): value for key, value in config_overrides}):
team_executor = AwsEcsExecutor(team_name=team_name)
task_kwargs = ecs_executor_config.build_task_kwargs(team_executor.conf)
assert task_kwargs["cluster"] == "team_a_cluster"
assert task_kwargs["overrides"]["containerOverrides"][0]["name"] == "team_a_container"
assert task_kwargs["networkConfiguration"]["awsvpcConfiguration"] == {
"subnets": ["team_a_sub1", "team_a_sub2"],
"securityGroups": ["team_a_sg1", "team_a_sg2"],
}
assert task_kwargs["launchType"] == "EC2"
assert task_kwargs["taskDefinition"] == "team_a_task_def"
# Now create an executor without a team and ensure the non-team configs are used.
non_team_executor = AwsEcsExecutor()
task_kwargs = ecs_executor_config.build_task_kwargs(non_team_executor.conf)
assert task_kwargs["cluster"] == "some_cluster"
assert task_kwargs["overrides"]["containerOverrides"][0]["name"] == "container_name"
assert task_kwargs["networkConfiguration"]["awsvpcConfiguration"] == {
"subnets": ["sub1", "sub2"],
"securityGroups": ["sg1", "sg2"],
"assignPublicIp": "DISABLED",
}
assert task_kwargs["launchType"] == "FARGATE"
assert task_kwargs["taskDefinition"] == "some_task_def"
@conf_vars({(CONFIG_GROUP_NAME, AllEcsConfigKeys.CONTAINER_NAME): "container-name"})
def test_config_defaults_are_applied(self, assign_subnets):
from airflow.providers.amazon.aws.executors.ecs import ecs_executor_config
task_kwargs = _recursive_flatten_dict(ecs_executor_config.build_task_kwargs(conf))
found_keys = {convert_camel_to_snake(key): key for key in task_kwargs.keys()}
for expected_key, expected_value_raw in CONFIG_DEFAULTS.items():
# conn_id, max_run_task_attempts, and check_health_on_startup are used by the executor,
# but are not expected to appear in the task_kwargs.
if expected_key in [
AllEcsConfigKeys.AWS_CONN_ID,
AllEcsConfigKeys.MAX_RUN_TASK_ATTEMPTS,
AllEcsConfigKeys.CHECK_HEALTH_ON_STARTUP,
]:
assert expected_key not in found_keys.keys()
else:
assert expected_key in found_keys.keys()
# Make sure to convert "assign_public_ip" from True/False to ENABLE/DISABLE.
expected_value = (
parse_assign_public_ip(expected_value_raw)
if expected_key is AllEcsConfigKeys.ASSIGN_PUBLIC_IP
else expected_value_raw
)
assert expected_value == task_kwargs[found_keys[expected_key]]
def test_provided_values_override_defaults(self, assign_subnets, assign_container_name, monkeypatch):
"""
Expected precedence is default values are overwritten by values provided explicitly,
and those values are overwritten by those provided in run_task_kwargs.
"""
run_task_kwargs_env_key = f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.RUN_TASK_KWARGS}".upper()
platform_version_env_key = (
f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.PLATFORM_VERSION}".upper()
)
default_version = CONFIG_DEFAULTS[AllEcsConfigKeys.PLATFORM_VERSION]
templated_version = "1"
first_explicit_version = "2"
second_explicit_version = "3"
# Confirm the default value is applied when no value is provided.
monkeypatch.delenv(platform_version_env_key, raising=False)
monkeypatch.delenv(run_task_kwargs_env_key, raising=False)
from airflow.providers.amazon.aws.executors.ecs import ecs_executor_config
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
assert task_kwargs["platformVersion"] == default_version
# Provide a new value explicitly and assert that it is applied over the default.
monkeypatch.setenv(platform_version_env_key, first_explicit_version)
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
assert task_kwargs["platformVersion"] == first_explicit_version
# Provide a value via template and assert that it is applied over the explicit value.
monkeypatch.setenv(
run_task_kwargs_env_key,
json.dumps({AllEcsConfigKeys.PLATFORM_VERSION: templated_version}),
)
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
assert task_kwargs["platformVersion"] == templated_version
# Provide a new value explicitly and assert it is not applied over the templated values.
monkeypatch.setenv(platform_version_env_key, second_explicit_version)
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
assert task_kwargs["platformVersion"] == templated_version
@mock.patch.object(EcsHook, "conn")
def test_count_can_not_be_modified_by_the_user(
self, _, assign_subnets, assign_container_name, monkeypatch
):
"""The ``count`` parameter must always be 1; verify that the user can not override this value."""
templated_version = "1"
templated_cluster = "templated_cluster_name"
provided_run_task_kwargs = {
AllEcsConfigKeys.PLATFORM_VERSION: templated_version,
AllEcsConfigKeys.CLUSTER: templated_cluster,
"count": 2, # The user should not be allowed to overwrite count, it must be value of 1
}
# Provide values via task run kwargs template and assert that they are applied,
# which verifies that the OTHER values were changed.
monkeypatch.setenv(
f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.RUN_TASK_KWARGS}".upper(),
json.dumps(provided_run_task_kwargs),
)
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
assert task_kwargs["platformVersion"] == templated_version
assert task_kwargs["cluster"] == templated_cluster
# Assert that count was NOT overridden when the others were applied.
assert task_kwargs["count"] == 1
def test_verify_tags_are_used_as_provided(self, assign_subnets, assign_container_name, monkeypatch):
"""Confirm that the ``tags`` provided are not converted to camelCase."""
templated_tags = {"Apache": "Airflow"}
provided_run_task_kwargs = {
"tags": templated_tags, # The user should be allowed to pass arbitrary run task args
}
run_task_kwargs_env_key = f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.RUN_TASK_KWARGS}".upper()
monkeypatch.setenv(run_task_kwargs_env_key, json.dumps(provided_run_task_kwargs))
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
# Verify that tag names are exempt from the camel-case conversion.
assert task_kwargs["tags"] == templated_tags
def test_that_provided_kwargs_are_moved_to_correct_nesting(self, monkeypatch):
"""
kwargs such as subnets, security groups, public ip, and container name are valid run task kwargs,
but they are not placed at the root of the kwargs dict, they should be nested in various sub dicts.
Ensure we don't leave any behind in the wrong location.
"""
kwargs_to_test = {
AllEcsConfigKeys.CONTAINER_NAME: "foobar",
AllEcsConfigKeys.ASSIGN_PUBLIC_IP: "True",
AllEcsConfigKeys.SECURITY_GROUPS: "sg1,sg2",
AllEcsConfigKeys.SUBNETS: "sub1,sub2",
}
for key, value in kwargs_to_test.items():
monkeypatch.setenv(f"AIRFLOW__{CONFIG_GROUP_NAME}__{key}".upper(), value)
run_task_kwargs = ecs_executor_config.build_task_kwargs(conf)
run_task_kwargs_network_config = run_task_kwargs["networkConfiguration"]["awsvpcConfiguration"]
for key, value in kwargs_to_test.items():
# Assert that the values are not at the root of the kwargs
camelized_key = camelize(key, uppercase_first_letter=False)
assert key not in run_task_kwargs
assert camelized_key not in run_task_kwargs
if key == AllEcsConfigKeys.CONTAINER_NAME:
# The actual ECS run_task_kwarg is "name" not "containerName"
assert run_task_kwargs["overrides"]["containerOverrides"][0]["name"] == value
elif key == AllEcsConfigKeys.ASSIGN_PUBLIC_IP:
# The value for this kwarg is cast from bool to enabled/disabled
assert run_task_kwargs_network_config[camelized_key] == "ENABLED"
else:
assert run_task_kwargs_network_config[camelized_key] == value.split(",")
def test_start_failure_with_invalid_permissions(self, set_env_vars):
executor = AwsEcsExecutor()
# Replace boto3 ECS client with mock.
ecs_mock = mock.Mock(spec=executor.ecs)
mock_resp = {
"Error": {
"Code": "AccessDeniedException",
"Message": "no identity-based policy allows the ecs:StopTask action",
}
}
ecs_mock.stop_task.side_effect = ClientError(mock_resp, "StopTask")
executor.ecs = ecs_mock
with pytest.raises(AirflowException, match=mock_resp["Error"]["Message"]):
executor.start()
def test_start_failure_with_invalid_cluster_name(self, set_env_vars):
executor = AwsEcsExecutor()
# Replace boto3 ECS client with mock.
ecs_mock = mock.Mock(spec=executor.ecs)
mock_resp = {"Error": {"Code": "ClusterNotFoundException", "Message": "Cluster not found."}}
ecs_mock.stop_task.side_effect = ClientError(mock_resp, "StopTask")
executor.ecs = ecs_mock
with pytest.raises(AirflowException, match=mock_resp["Error"]["Message"]):
executor.start()
def test_start_success(self, set_env_vars, caplog):
executor = AwsEcsExecutor()
# Replace boto3 ECS client with mock.
ecs_mock = mock.Mock(spec=executor.ecs)
mock_resp = {
"Error": {"Code": "InvalidParameterException", "Message": "The referenced task was not found."}
}
ecs_mock.stop_task.side_effect = ClientError(mock_resp, "StopTask")
executor.ecs = ecs_mock
caplog.set_level(logging.DEBUG)
executor.start()
assert "succeeded" in caplog.text
def test_start_health_check_config(self, set_env_vars):
executor = AwsEcsExecutor()
# Replace boto3 ECS client with mock.
ecs_mock = mock.Mock(spec=executor.ecs)
mock_resp = {
"Error": {"Code": "InvalidParameterException", "Message": "The referenced task was not found."}
}
ecs_mock.stop_task.side_effect = ClientError(mock_resp, "StopTask")
executor.ecs = ecs_mock
with conf_vars({(CONFIG_GROUP_NAME, AllEcsConfigKeys.CHECK_HEALTH_ON_STARTUP): "False"}):
executor.start()
ecs_mock.stop_task.assert_not_called()
def test_providing_both_capacity_provider_and_launch_type_fails(self, set_env_vars, monkeypatch):
cps = "[{'capacityProvider': 'cp1', 'weight': 5}, {'capacityProvider': 'cp2', 'weight': 1}]"
expected_error = re.escape(
"capacity_provider_strategy and launch_type are mutually exclusive, you can not provide both."
)
with conf_vars({(CONFIG_GROUP_NAME, AllEcsConfigKeys.CAPACITY_PROVIDER_STRATEGY): cps}):
with pytest.raises(ValueError, match=expected_error):
AwsEcsExecutor()
def test_providing_capacity_provider(self, set_env_vars):
# If a capacity provider strategy is supplied without a launch type, use the strategy.
valid_capacity_provider = (
"[{'capacityProvider': 'cp1', 'weight': 5}, {'capacityProvider': 'cp2', 'weight': 1}]"
)
conf_overrides = {
(CONFIG_GROUP_NAME, AllEcsConfigKeys.CAPACITY_PROVIDER_STRATEGY): valid_capacity_provider,
(CONFIG_GROUP_NAME, AllEcsConfigKeys.LAUNCH_TYPE): None,
}
with conf_vars(conf_overrides):
from airflow.providers.amazon.aws.executors.ecs import ecs_executor_config
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
assert "launchType" not in task_kwargs
assert task_kwargs["capacityProviderStrategy"] == valid_capacity_provider
@mock.patch.object(EcsHook, "conn")
def test_providing_no_capacity_provider_no_lunch_type_with_cluster_default(self, mock_conn, set_env_vars):
# If no capacity provider strategy is supplied and no launch type, but the
# cluster has a default capacity provider strategy, use the cluster's default.
mock_conn.describe_clusters.return_value = {
"clusters": [{"defaultCapacityProviderStrategy": ["some_strategy"]}]
}
with conf_vars({(CONFIG_GROUP_NAME, AllEcsConfigKeys.LAUNCH_TYPE): None}):
from airflow.providers.amazon.aws.executors.ecs import ecs_executor_config
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
assert "launchType" not in task_kwargs
assert "capacityProviderStrategy" not in task_kwargs
mock_conn.describe_clusters.assert_called_once()
@mock.patch.object(EcsHook, "conn")
def test_providing_no_capacity_provider_no_lunch_type_no_cluster_default(self, mock_conn, set_env_vars):
# If no capacity provider strategy is supplied and no launch type, and the cluster
# does not have a default capacity provider strategy, use the FARGATE launch type.
mock_conn.describe_clusters.return_value = {"clusters": [{"status": "ACTIVE"}]}
with conf_vars({(CONFIG_GROUP_NAME, AllEcsConfigKeys.LAUNCH_TYPE): None}):
from airflow.providers.amazon.aws.executors.ecs import ecs_executor_config
task_kwargs = ecs_executor_config.build_task_kwargs(conf)
assert task_kwargs["launchType"] == "FARGATE"
@pytest.mark.parametrize(
("run_task_kwargs", "exec_config", "expected_result"),
[
# No input run_task_kwargs or executor overrides
(
{},
{},
{
"taskDefinition": "some-task-def",
"launchType": "FARGATE",
"cluster": "some-cluster",
"platformVersion": "LATEST",
"count": 1,
"overrides": {
"containerOverrides": [
{
"command": ["command"],
"name": "container-name",
"environment": [{"name": "AIRFLOW_IS_EXECUTOR_CONTAINER", "value": "true"}],
}
]
},
"networkConfiguration": {
"awsvpcConfiguration": {
"subnets": ["sub1", "sub2"],
"securityGroups": ["sg1", "sg2"],
"assignPublicIp": "DISABLED",
}
},
},
),
# run_task_kwargs provided, not exec_config
(
{
"startedBy": "Banana",
"tags": [{"key": "FOO", "value": "BAR"}],
"overrides": {
"containerOverrides": [
{
"name": "container-name",
"memory": 500,
"cpu": 10,
"environment": [{"name": "X", "value": "Y"}],
}
]
},
},
{},
{
"startedBy": "Banana",
"tags": [{"key": "FOO", "value": "BAR"}],
"taskDefinition": "some-task-def",
"launchType": "FARGATE",
"cluster": "some-cluster",
"platformVersion": "LATEST",
"count": 1,
"overrides": {
"containerOverrides": [
{
"memory": 500,
"cpu": 10,
"command": ["command"],
"name": "container-name",
"environment": [
{"name": "X", "value": "Y"},
# Added by the ecs executor
{"name": "AIRFLOW_IS_EXECUTOR_CONTAINER", "value": "true"},
],
}
]
},
# Added by the ecs executor
"networkConfiguration": {
"awsvpcConfiguration": {
"subnets": ["sub1", "sub2"],
"securityGroups": ["sg1", "sg2"],
"assignPublicIp": "DISABLED",
}
},
},
),
# exec_config provided, no run_task_kwargs
(
{},
{
"startedBy": "Banana",
"tags": [{"key": "FOO", "value": "BAR"}],
"overrides": {
"containerOverrides": [
{
"name": "container-name",
"memory": 500,
"cpu": 10,
"environment": [{"name": "X", "value": "Y"}],
}
]
},
},
{
"startedBy": "Banana",
"tags": [{"key": "FOO", "value": "BAR"}],
"taskDefinition": "some-task-def",
"launchType": "FARGATE",
"cluster": "some-cluster",
"platformVersion": "LATEST",
"count": 1,
"overrides": {
"containerOverrides": [
{
"memory": 500,
"cpu": 10,
"command": ["command"],
"name": "container-name",
"environment": [
{"name": "X", "value": "Y"},
# Added by the ecs executor
{"name": "AIRFLOW_IS_EXECUTOR_CONTAINER", "value": "true"},
],
}
]
},
# Added by the ecs executor
"networkConfiguration": {
"awsvpcConfiguration": {
"subnets": ["sub1", "sub2"],
"securityGroups": ["sg1", "sg2"],
"assignPublicIp": "DISABLED",
}
},
},
),
# Both run_task_kwargs and executor_config provided. The latter should override the former,
# following a recursive python dict update strategy
(
{
"startedBy": "Banana",
"tags": [{"key": "FOO", "value": "BAR"}],
"taskDefinition": "foobar",
"overrides": {
"containerOverrides": [
{
"name": "container-name",
"memory": 500,
"cpu": 10,
"environment": [{"name": "X", "value": "Y"}],
}
]
},
},
{
"startedBy": "Fish",
"tags": [{"key": "X", "value": "Y"}, {"key": "W", "value": "Z"}],
"overrides": {
"containerOverrides": [
{
"name": "container-name",
"memory": 300,
"environment": [{"name": "W", "value": "Z"}],
}
]
},
},
{
# tags and startedBy are overridden by exec_config
"startedBy": "Fish",
# List types overwrite entirely, as python dict update would do
"tags": [{"key": "X", "value": "Y"}, {"key": "W", "value": "Z"}],
# taskDefinition remains since it is not a list type and not overridden by exec config
"taskDefinition": "foobar",
"launchType": "FARGATE",
"cluster": "some-cluster",
"platformVersion": "LATEST",
"count": 1,
"overrides": {
"containerOverrides": [
{
"memory": 300,
# cpu is not present because it was missing from the container overrides in
# the exec_config
"command": ["command"],
"name": "container-name",
"environment": [
# Overridden list type
{"name": "W", "value": "Z"}, # Only new env vars present, overwritten
# Added by the ecs executor
{"name": "AIRFLOW_IS_EXECUTOR_CONTAINER", "value": "true"},
],
}
]
},
# Added by the ecs executor
"networkConfiguration": {
"awsvpcConfiguration": {
"subnets": ["sub1", "sub2"],
"securityGroups": ["sg1", "sg2"],
"assignPublicIp": "DISABLED",
}
},
},
),
],
)
def test_run_task_kwargs_exec_config_overrides(
self, set_env_vars, run_task_kwargs, exec_config, expected_result, monkeypatch
):
run_task_kwargs_env_key = f"AIRFLOW__{CONFIG_GROUP_NAME}__{AllEcsConfigKeys.RUN_TASK_KWARGS}".upper()
monkeypatch.setenv(run_task_kwargs_env_key, json.dumps(run_task_kwargs))
mock_ti_key = mock.Mock(spec=TaskInstanceKey)
command = ["command"]
executor = AwsEcsExecutor()
final_run_task_kwargs = executor._run_task_kwargs(mock_ti_key, command, "queue", exec_config)
assert final_run_task_kwargs == expected_result
def test_short_import_path(self):
from airflow.providers.amazon.aws.executors.ecs import AwsEcsExecutor as AwsEcsExecutorShortPath
assert AwsEcsExecutor is AwsEcsExecutorShortPath
| TestEcsExecutorConfig |
python | getsentry__sentry | src/sentry/integrations/github/webhook.py | {
"start": 31323,
"end": 35417
} | class ____(Endpoint):
"""
GitHub Webhook API reference:
https://docs.github.com/en/webhooks-and-events/webhooks/about-webhooks
"""
authentication_classes = ()
permission_classes = ()
owner = ApiOwner.ECOSYSTEM
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
_handlers: dict[str, type[GitHubWebhook]] = {
GithubWebhookType.PUSH: PushEventWebhook,
GithubWebhookType.PULL_REQUEST: PullRequestEventWebhook,
GithubWebhookType.INSTALLATION: InstallationEventWebhook,
GithubWebhookType.ISSUE: IssuesEventWebhook,
}
def get_handler(self, event_type: str) -> type[GitHubWebhook] | None:
return self._handlers.get(event_type)
def is_valid_signature(self, method: str, body: bytes, secret: str, signature: str) -> bool:
if method == "sha256":
mod = hashlib.sha256
elif method == "sha1":
mod = hashlib.sha1
else:
raise NotImplementedError(f"signature method {method} is not supported")
expected = hmac.new(key=secret.encode("utf-8"), msg=body, digestmod=mod).hexdigest()
return constant_time_compare(expected, signature)
@method_decorator(csrf_exempt)
def dispatch(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if request.method != "POST":
return HttpResponse(status=405)
return super().dispatch(request, *args, **kwargs)
def get_logging_data(self) -> dict[str, Any] | None:
return {
"request_method": self.request.method,
"request_path": self.request.path,
}
def get_secret(self) -> str | None:
return options.get("github-app.webhook-secret")
def post(self, request: HttpRequest) -> HttpResponse:
return self.handle(request)
def handle(self, request: HttpRequest) -> HttpResponse:
clear_tags_and_context()
secret = self.get_secret()
if secret is None:
logger.error("github.webhook.missing-secret", extra=self.get_logging_data())
return HttpResponse(status=401)
body = bytes(request.body)
if not body:
logger.error("github.webhook.missing-body", extra=self.get_logging_data())
return HttpResponse(status=400)
try:
handler = self.get_handler(request.META["HTTP_X_GITHUB_EVENT"])
except KeyError:
logger.exception("github.webhook.missing-event", extra=self.get_logging_data())
logger.exception("Missing Github event in webhook.")
return HttpResponse(status=400)
if not handler:
logger.info(
"github.webhook.missing-handler",
extra={"event_type": request.META["HTTP_X_GITHUB_EVENT"]},
)
return HttpResponse(status=204)
try:
header = (
request.META.get("HTTP_X_HUB_SIGNATURE_256") or request.META["HTTP_X_HUB_SIGNATURE"]
)
method, signature = header.split("=", 1)
except (KeyError, ValueError):
logger.exception("github.webhook.missing-signature", extra=self.get_logging_data())
return HttpResponse(status=400)
if not self.is_valid_signature(method, body, secret, signature):
logger.error("github.webhook.invalid-signature", extra=self.get_logging_data())
return HttpResponse(status=401)
try:
event = orjson.loads(body)
except orjson.JSONDecodeError:
logger.exception("github.webhook.invalid-json", extra=self.get_logging_data())
logger.exception("Invalid JSON.")
return HttpResponse(status=400)
event_handler = handler()
with IntegrationWebhookEvent(
interaction_type=event_handler.event_type,
domain=IntegrationDomain.SOURCE_CODE_MANAGEMENT,
provider_key=event_handler.provider,
).capture():
event_handler(event)
return HttpResponse(status=204)
| GitHubIntegrationsWebhookEndpoint |
python | huggingface__transformers | src/transformers/models/apertus/modular_apertus.py | {
"start": 8907,
"end": 9175
} | class ____(NemotronMLP):
def __init__(self, config):
super().__init__()
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
| ApertusMLP |
python | walkccc__LeetCode | solutions/1487. Making File Names Unique/1487.py | {
"start": 0,
"end": 638
} | class ____:
def getFolderNames(self, names: list[str]) -> list[str]:
ans = []
nameToSuffix = {}
for name in names:
if name in nameToSuffix:
suffix = nameToSuffix[name] + 1
newName = self._getName(name, suffix)
while newName in nameToSuffix:
suffix += 1
newName = self._getName(name, suffix)
nameToSuffix[name] = suffix
nameToSuffix[newName] = 0
ans.append(newName)
else:
nameToSuffix[name] = 0
ans.append(name)
return ans
def _getName(self, name: str, suffix: int) -> str:
return name + '(' + str(suffix) + ')'
| Solution |
python | aimacode__aima-python | agents.py | {
"start": 21278,
"end": 21394
} | class ____(Obstacle):
pass
# ______________________________________________________________________________
| Wall |
python | Textualize__textual | src/textual/events.py | {
"start": 16996,
"end": 17158
} | class ____(MouseEvent, bubble=True, verbose=True):
"""Sent when the mouse wheel is scrolled *left*.
- [X] Bubbles
- [X] Verbose
"""
| MouseScrollLeft |
python | walkccc__LeetCode | solutions/3286. Find a Safe Walk Through a Grid/3286.py | {
"start": 0,
"end": 796
} | class ____:
def findSafeWalk(self, grid: list[list[int]], health: int) -> bool:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(grid)
n = len(grid[0])
initialHealth = health - grid[0][0]
q = collections.deque([(0, 0, initialHealth)])
seen = {(0, 0, initialHealth)}
while q:
for _ in range(len(q)):
i, j, h = q.popleft()
if i == m - 1 and j == n - 1 and h > 0:
return True
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
nextHealth = h - grid[x][y]
if nextHealth <= 0 or (x, y, nextHealth) in seen:
continue
q.append((x, y, nextHealth))
seen.add((x, y, nextHealth))
return False
| Solution |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/cast_test.py | {
"start": 1108,
"end": 2376
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Tests cast to FP32 are split in FP16 mode."""
def _ConstOp(self, shape, dtype):
return constant_op.constant(np.random.randn(*shape), dtype=dtype)
def GraphFn(self, x):
b_f = self._ConstOp((1, 10), dtypes.float16)
x_f = math_ops.cast(x, dtypes.float16)
x_f = math_ops.mul(x_f, b_f) # FP16 Multiply
x_f = math_ops.cast(x_f, dtypes.float32)
b_f = self._ConstOp((1, 10), dtypes.float32)
x_f = math_ops.add(x_f, b_f) # FP32 Add
return array_ops.identity(x_f, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[1, 10]], [[1, 10]])
def ExpectedEnginesToBuild(self, run_params):
"""Returns the expected engines to build."""
return {"TRTEngineOp_000": ["AddV2", "Cast", "Const", "Mul"]}
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-03 if run_params.precision_mode == "FP32" else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-03 if run_params.precision_mode == "FP32" else 1.e-02
if __name__ == "__main__":
test.main()
| CastInt32ToFp32Test |
python | pypa__setuptools | setuptools/_distutils/tests/test_dist.py | {
"start": 711,
"end": 1165
} | class ____(Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
@pytest.fixture
def clear_argv():
del sys.argv[1:]
@support.combine_markers
@pytest.mark.usefixtures('save_env')
@pytest.mark.usefixtures('save_argv')
| TestDistribution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 33871,
"end": 34675
} | class ____(BaseModel):
type: Literal["RemoveFields"]
condition: Optional[str] = Field(
"",
description="The predicate to filter a property by a property value. Property will be removed if it is empty OR expression is evaluated to True.,",
examples=[
"{{ property|string == '' }}",
"{{ property is integer }}",
"{{ property|length > 5 }}",
"{{ property == 'some_string_to_match' }}",
],
)
field_pointers: List[List[str]] = Field(
...,
description="Array of paths defining the field to remove. Each item is an array whose field describe the path of a field to remove.",
examples=[["tags"], [["content", "html"], ["content", "plain_text"]]],
title="Field Paths",
)
| RemoveFields |
python | langchain-ai__langchain | libs/partners/anthropic/tests/integration_tests/test_chat_models.py | {
"start": 31498,
"end": 31608
} | class ____(BaseModel):
"""Person data."""
name: str
age: int
nicknames: list[str] | None
| Person |
python | facebook__pyre-check | client/commands/check.py | {
"start": 1217,
"end": 5770
} | class ____:
"""
Data structure for configuration options the backend check command can recognize.
Need to keep in sync with `source/command/checkCommand.ml`
"""
base_arguments: backend_arguments.BaseArguments
additional_logging_sections: Sequence[str] = dataclasses.field(default_factory=list)
show_error_traces: bool = False
strict: bool = False
def serialize(self) -> Dict[str, Any]:
return {
**self.base_arguments.serialize(),
"additional_logging_sections": self.additional_logging_sections,
"show_error_traces": self.show_error_traces,
"strict": self.strict,
}
def create_check_arguments(
configuration: frontend_configuration.Base,
check_arguments: command_arguments.CheckArguments,
) -> Arguments:
"""
Translate client configurations to backend check configurations.
This API is not pure since it needs to access filesystem to filter out
nonexistent directories. It is idempotent though, since it does not alter
any filesystem state.
"""
source_paths = backend_arguments.get_source_path_for_check(
configuration,
kill_buck_after_build=False,
number_of_buck_threads=None,
)
logging_sections = check_arguments.logging_sections
additional_logging_sections = (
[] if logging_sections is None else logging_sections.split(",")
)
if check_arguments.noninteractive:
additional_logging_sections.append("-progress")
log_directory = configuration.get_log_directory()
profiling_output = (
backend_arguments.get_profiling_log_path(log_directory)
if check_arguments.enable_profiling
else None
)
memory_profiling_output = (
backend_arguments.get_profiling_log_path(log_directory)
if check_arguments.enable_memory_profiling
else None
)
logger = configuration.get_remote_logger()
remote_logging = (
backend_arguments.RemoteLogging(
logger=logger, identifier=check_arguments.log_identifier or ""
)
if logger is not None
else None
)
return Arguments(
base_arguments=backend_arguments.BaseArguments(
log_path=str(log_directory),
global_root=str(configuration.get_global_root()),
checked_directory_allowlist=backend_arguments.get_checked_directory_allowlist(
configuration, source_paths
),
checked_directory_blocklist=(configuration.get_ignore_all_errors()),
debug=check_arguments.debug,
enable_readonly_analysis=configuration.get_enable_readonly_analysis(),
enable_strict_override_check=configuration.get_enable_strict_override_check(),
enable_strict_any_check=configuration.get_enable_strict_any_check(),
enable_unawaited_awaitable_analysis=(
configuration.get_enable_unawaited_awaitable_analysis()
),
excludes=configuration.get_excludes(),
extensions=configuration.get_valid_extension_suffixes(),
include_suppressed_errors=configuration.get_include_suppressed_errors(),
relative_local_root=configuration.get_relative_local_root(),
memory_profiling_output=memory_profiling_output,
number_of_workers=configuration.get_number_of_workers(),
parallel=not check_arguments.sequential,
profiling_output=profiling_output,
python_version=configuration.get_python_version(),
system_platform=configuration.get_system_platform(),
shared_memory=configuration.get_shared_memory(),
remote_logging=remote_logging,
search_paths=configuration.get_existent_search_paths(),
source_paths=source_paths,
),
additional_logging_sections=additional_logging_sections,
show_error_traces=check_arguments.show_error_traces,
strict=configuration.is_strict(),
)
@contextlib.contextmanager
def create_check_arguments_and_cleanup(
configuration: frontend_configuration.Base,
check_arguments: command_arguments.CheckArguments,
) -> Iterator[Arguments]:
arguments = create_check_arguments(configuration, check_arguments)
try:
yield arguments
finally:
# It is safe to clean up source paths after check command since
# any created artifact directory won't be reused by other commands.
arguments.base_arguments.source_paths.cleanup()
| Arguments |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 45375,
"end": 48187
} | class ____(BaseDataset):
"""
Feature: Datasets created with "maxshape" may be resized
"""
def test_create(self):
""" Create dataset with "maxshape" """
dset = self.f.create_dataset(make_name(), (20, 30), maxshape=(20, 60))
self.assertIsNot(dset.chunks, None)
self.assertEqual(dset.maxshape, (20, 60))
def test_create_1D_integer_maxshape_tuple(self):
""" Create dataset with "maxshape" using tuple shape and integer maxshape"""
dset = self.f.create_dataset(make_name(), (20,), maxshape=20)
self.assertIsNot(dset.chunks, None)
self.assertEqual(dset.maxshape, (20,))
def test_create_1D_integer_maxshape_integer(self):
""" Create dataset with "maxshape" using integer shape and integer maxshape"""
dset = self.f.create_dataset(make_name(), 20, maxshape=20)
self.assertEqual(dset.maxshape, (20,))
def test_resize(self):
""" Datasets may be resized up to maxshape """
dset = self.f.create_dataset(make_name(), (20, 30), maxshape=(20, 60))
self.assertEqual(dset.shape, (20, 30))
dset.resize((20, 50))
self.assertEqual(dset.shape, (20, 50))
dset.resize((20, 60))
self.assertEqual(dset.shape, (20, 60))
def test_resize_1D(self):
""" Datasets may be resized up to maxshape using integer maxshape"""
dset = self.f.create_dataset(make_name(), 20, maxshape=40)
self.assertEqual(dset.shape, (20,))
dset.resize((30,))
self.assertEqual(dset.shape, (30,))
def test_resize_over(self):
""" Resizing past maxshape triggers an exception """
dset = self.f.create_dataset(make_name(), (20, 30), maxshape=(20, 60))
with self.assertRaises(Exception):
dset.resize((20, 70))
def test_resize_nonchunked(self):
""" Resizing non-chunked dataset raises TypeError """
dset = self.f.create_dataset(make_name(), (20, 30))
with self.assertRaises(TypeError):
dset.resize((20, 60))
def test_resize_axis(self):
""" Resize specified axis """
dset = self.f.create_dataset(make_name(), (20, 30), maxshape=(20, 60))
dset.resize(50, axis=1)
self.assertEqual(dset.shape, (20, 50))
def test_axis_exc(self):
""" Illegal axis raises ValueError """
dset = self.f.create_dataset(make_name(), (20, 30), maxshape=(20, 60))
with self.assertRaises(ValueError):
dset.resize(50, axis=2)
def test_zero_dim(self):
""" Allow zero-length initial dims for unlimited axes (issue 111) """
dset = self.f.create_dataset(make_name(), (15, 0), maxshape=(15, None))
self.assertEqual(dset.shape, (15, 0))
self.assertEqual(dset.maxshape, (15, None))
| TestResize |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 7325,
"end": 7464
} | class ____(SQLRole):
__slots__ = ()
_role_name = "String column name or column expression for DDL constraint"
| DDLConstraintColumnRole |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 554651,
"end": 555174
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteIpAllowListEntry"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "ip_allow_list_entry")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
ip_allow_list_entry = sgqlc.types.Field("IpAllowListEntry", graphql_name="ipAllowListEntry")
"""The IP allow list entry that was deleted."""
| DeleteIpAllowListEntryPayload |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 16828,
"end": 18811
} | class ____(BaseTestCase, DjangoTestCase):
# We need Django to flush all databases.
databases: set[str] | str = "__all__"
@contextmanager
def auto_select_silo_mode_on_redirects(self):
"""
Tests that utilize follow=True may follow redirects between silo modes. This isn't ideal but convenient for
testing certain work flows. Using this context manager, the silo mode in the test will swap automatically
for each view's decorator in order to prevent otherwise unavoidable SiloAvailability errors.
"""
old_request = self.client.request
def request(**request: Any) -> Any:
resolved = resolve(request["PATH_INFO"])
view_class = getattr(resolved.func, "view_class", None)
if view_class is not None:
endpoint_silo_limit = getattr(view_class, "silo_limit", None)
if endpoint_silo_limit:
for mode in endpoint_silo_limit.modes:
if mode is SiloMode.MONOLITH or mode is SiloMode.get_current_mode():
continue
region = None
if mode is SiloMode.REGION:
# TODO: Can we infer the correct region here? would need to package up the
# the request dictionary into a higher level object, which also involves invoking
# _base_environ and maybe other logic buried in Client.....
region = get_region_by_name(settings.SENTRY_MONOLITH_REGION)
with (
SingleProcessSiloModeState.exit(),
SingleProcessSiloModeState.enter(mode, region),
):
return old_request(**request)
return old_request(**request)
with mock.patch.object(self.client, "request", new=request):
yield
| TestCase |
python | pyodide__pyodide | src/py/pyodide/console.py | {
"start": 9047,
"end": 20665
} | class ____:
"""Interactive Pyodide console
An interactive console based on the Python standard library
:py:class:`~code.InteractiveConsole` that manages stream redirections and
asynchronous execution of the code.
The stream callbacks can be modified directly by assigning to
:py:attr:`~Console.stdin_callback` (for example) as long as
``persistent_stream_redirection`` is ``False``.
Parameters
----------
globals :
The global namespace in which to evaluate the code. Defaults to a new
empty dictionary.
stdin_callback :
Function to call at each read from :py:data:`sys.stdin`. Defaults to :py:data:`None`.
stdout_callback :
Function to call at each write to :py:data:`sys.stdout`. Defaults to :py:data:`None`.
stderr_callback :
Function to call at each write to :py:data:`sys.stderr`. Defaults to :py:data:`None`.
persistent_stream_redirection :
Should redirection of standard streams be kept between calls to
:py:meth:`~Console.runcode`? Defaults to :py:data:`False`.
filename :
The file name to report in error messages. Defaults to ``"<console>"``.
dont_inherit :
Whether to inherit ``__future__`` imports from the outer code.
See the documentation for the built-in :external:py:func:`compile` function.
optimize :
Specifies the optimization level of the compiler. See the documentation
for the built-in :external:py:func:`compile` function.
"""
globals: dict[str, Any]
"""The namespace used as the globals"""
stdin_callback: Callable[[int], str] | None
"""The function to call at each read from :py:data:`sys.stdin`"""
stdout_callback: Callable[[str], int | None] | None
"""Function to call at each write to :py:data:`sys.stdout`."""
stderr_callback: Callable[[str], int | None] | None
"""Function to call at each write to :py:data:`sys.stderr`."""
buffer: list[str]
"""The list of lines of code that have been the argument to
:py:meth:`~Console.push`.
This is emptied whenever the code is executed.
"""
completer_word_break_characters: str
"""The set of characters considered by :py:meth:`~Console.complete` to be word breaks."""
def __init__(
self,
globals: dict[str, Any] | None = None,
*,
stdin_callback: Callable[[int], str] | None = None,
stdout_callback: Callable[[str], None] | None = None,
stderr_callback: Callable[[str], None] | None = None,
persistent_stream_redirection: bool = False,
filename: str = "<console>",
dont_inherit: bool = False,
optimize: int = -1,
) -> None:
if globals is None:
globals = {"__name__": "__console__", "__doc__": None}
self.globals = globals
self._stdout = None
self._stderr = None
self.stdin_callback = stdin_callback
self.stdout_callback = stdout_callback
self.stderr_callback = stderr_callback
self.filename = filename
self.buffer = []
self._lock = asyncio.Lock()
self._streams_redirected = False
self._stream_generator: Generator[None] | None = (
None # track persistent stream redirection
)
if persistent_stream_redirection:
self.persistent_redirect_streams()
self._completer = rlcompleter.Completer(self.globals)
# all nonalphanums except '.'
# see https://github.com/python/cpython/blob/a4258e8cd776ba655cc54ba54eaeffeddb0a267c/Modules/readline.c#L1211
self.completer_word_break_characters = (
""" \t\n`~!@#$%^&*()-=+[{]}\\|;:'\",<>/?"""
)
self._compile = _CommandCompiler(
flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT,
dont_inherit=dont_inherit,
optimize=optimize,
)
def persistent_redirect_streams(self) -> None:
"""Redirect :py:data:`~sys.stdin`/:py:data:`~sys.stdout`/:py:data:`~sys.stdout` persistently"""
if self._stream_generator:
return
self._stream_generator = self._stdstreams_redirections_inner()
assert self._stream_generator is not None
next(self._stream_generator) # trigger stream redirection
# streams will be reverted to normal when self._stream_generator is destroyed.
def persistent_restore_streams(self) -> None:
"""Restore :py:data:`~sys.stdin`/:py:data:`~sys.stdout`/:py:data:`~sys.stdout` if they have been persistently redirected"""
# allowing _stream_generator to be garbage collected restores the streams
self._stream_generator = None
@contextmanager
def redirect_streams(self) -> Generator[None]:
"""A context manager to redirect standard streams.
This supports nesting."""
yield from self._stdstreams_redirections_inner()
def _stdstreams_redirections_inner(self) -> Generator[None]:
"""This is the generator which implements redirect_streams and the stdstreams_redirections"""
# already redirected?
if self._streams_redirected:
yield
return
redirects: list[Any] = []
if self.stdin_callback:
stdin_name = getattr(sys.stdin, "name", "<stdin>")
stdin_stream = _ReadStream(self.stdin_callback, name=stdin_name)
redirects.append(redirect_stdin(stdin_stream))
if self.stdout_callback:
stdout_name = getattr(sys.stdout, "name", "<stdout>")
stdout_stream = _WriteStream(self.stdout_callback, name=stdout_name)
redirects.append(redirect_stdout(stdout_stream))
if self.stderr_callback:
stderr_name = getattr(sys.stderr, "name", "<stderr>")
stderr_stream = _WriteStream(self.stderr_callback, name=stderr_name)
redirects.append(redirect_stderr(stderr_stream))
try:
self._streams_redirected = True
with ExitStack() as stack:
for redirect in redirects:
stack.enter_context(redirect)
yield
finally:
self._streams_redirected = False
def runsource(self, source: str, filename: str = "<console>") -> ConsoleFuture:
"""Compile and run source code in the interpreter."""
res: ConsoleFuture | None
try:
code = self._compile(source, filename, "single")
except (OverflowError, SyntaxError, ValueError) as e:
# Case 1
if e.__traceback__:
traceback.clear_frames(e.__traceback__)
res = ConsoleFuture(SYNTAX_ERROR)
res.set_exception(e)
res.formatted_error = self.formatsyntaxerror(e)
return res
if code is None:
res = ConsoleFuture(INCOMPLETE)
res.set_result(None)
return res
res = ConsoleFuture(COMPLETE)
def done_cb(fut: asyncio.Task[Any]) -> None:
nonlocal res
assert res is not None
exc = fut.exception()
if exc:
res.formatted_error = self.formattraceback(exc)
res.set_exception(exc)
exc = None
else:
res.set_result(fut.result())
res = None
ensure_future(self._runcode_with_lock(source, code)).add_done_callback(done_cb)
return res
async def _runcode_with_lock(self, source: str, code: CodeRunner) -> Any:
async with self._lock:
return await self.runcode(source, code)
async def runcode(self, source: str, code: CodeRunner) -> Any:
"""Execute a code object and return the result."""
with self.redirect_streams():
try:
return await code.run_async(self.globals)
finally:
sys.stdout.flush()
sys.stderr.flush()
def formatsyntaxerror(self, e: Exception) -> str:
"""Format the syntax error that just occurred.
This doesn't include a stack trace because there isn't one. The actual
error object is stored into :py:data:`sys.last_value`.
"""
sys.last_exc = e
sys.last_type = type(e)
sys.last_value = e
sys.last_traceback = None
return "".join(traceback.format_exception_only(type(e), e))
def num_frames_to_keep(self, tb: TracebackType | None) -> int:
keep_frames = False
kept_frames = 0
# Try to trim out stack frames inside our code
for frame, _ in traceback.walk_tb(tb):
keep_frames = keep_frames or frame.f_code.co_filename == self.filename
keep_frames = keep_frames or frame.f_code.co_filename == "<exec>"
if keep_frames:
kept_frames += 1
return kept_frames
def formattraceback(self, e: BaseException) -> str:
"""Format the exception that just occurred.
The actual error object is stored into :py:data:`sys.last_value`.
"""
sys.last_exc = e
sys.last_type = type(e)
sys.last_value = e
sys.last_traceback = e.__traceback__
nframes = self.num_frames_to_keep(e.__traceback__)
return "".join(
traceback.format_exception(type(e), e, e.__traceback__, -nframes)
)
def push(self, line: str) -> ConsoleFuture:
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have internal
newlines. The line is appended to a buffer and the interpreter's
:py:meth:`~Console.runsource` method is called with the concatenated contents of the
buffer as source. If this indicates that the command was executed or
invalid, the buffer is reset; otherwise, the command is incomplete, and
the buffer is left as it was after the line was appended.
The return value is the result of calling :py:meth:`~Console.runsource` on the current buffer
contents.
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
result = self.runsource(source, self.filename)
if result.syntax_check != INCOMPLETE:
self.buffer = []
return result
def complete(self, source: str) -> tuple[list[str], int]:
r"""Use Python's :py:mod:`rlcompleter` to complete the source string
using the :py:attr:`Console.globals` namespace.
Finds the last "word" in the source string and completes it with
rlcompleter. Word breaks are determined by the set of characters in
:py:attr:`~Console.completer_word_break_characters`.
Parameters
----------
source :
The source string to complete at the end.
Returns
-------
completions : :py:class:`list`\[:py:class:`str`]
A list of completion strings.
start : :py:class:`int`
The index where completion starts.
Examples
--------
>>> shell = Console()
>>> shell.complete("str.isa")
(['str.isalnum(', 'str.isalpha(', 'str.isascii('], 0)
>>> shell.complete("a = 5 ; str.isa")
(['str.isalnum(', 'str.isalpha(', 'str.isascii('], 8)
"""
start = max(map(source.rfind, self.completer_word_break_characters)) + 1
source = source[start:]
if "." in source:
completions = self._completer.attr_matches(source)
else:
completions = self._completer.global_matches(source)
return completions, start
| Console |
python | sqlalchemy__sqlalchemy | test/orm/test_sync.py | {
"start": 968,
"end": 9186
} | class ____(
fixtures.MappedTest, testing.AssertsExecutionResults, AssertsUOW
):
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
)
Table(
"t2",
metadata,
Column("id", Integer, ForeignKey("t1.id"), primary_key=True),
Column("t1id", Integer, ForeignKey("t1.id")),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(cls.classes.A, cls.tables.t1)
cls.mapper_registry.map_imperatively(cls.classes.B, cls.tables.t2)
def _fixture(self):
A, B = self.classes.A, self.classes.B
session = fixture_session()
uowcommit = self._get_test_uow(session)
a_mapper = class_mapper(A)
b_mapper = class_mapper(B)
self.a1 = a1 = A()
self.b1 = b1 = B()
uowcommit = self._get_test_uow(session)
return (
uowcommit,
attributes.instance_state(a1),
attributes.instance_state(b1),
a_mapper,
b_mapper,
)
def test_populate(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(a_mapper.c.id, b_mapper.c.id)]
a1.obj().id = 7
assert "id" not in b1.obj().__dict__
sync._populate(a1, a_mapper, b1, b_mapper, pairs, uowcommit, False)
eq_(b1.obj().id, 7)
eq_(b1.obj().__dict__["id"], 7)
assert ("pk_cascaded", b1, b_mapper.c.id) not in uowcommit.attributes
def test_populate_flag_cascaded(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(a_mapper.c.id, b_mapper.c.id)]
a1.obj().id = 7
assert "id" not in b1.obj().__dict__
sync._populate(a1, a_mapper, b1, b_mapper, pairs, uowcommit, True)
eq_(b1.obj().id, 7)
eq_(b1.obj().__dict__["id"], 7)
eq_(uowcommit.attributes[("pk_cascaded", b1, b_mapper.c.id)], True)
def test_populate_unmapped_source(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(b_mapper.c.id, b_mapper.c.id)]
assert_raises_message(
orm_exc.UnmappedColumnError,
"Can't execute sync rule for source column 't2.id'; "
r"mapper 'Mapper\[A\(t1\)\]' does not map this column.",
sync._populate,
a1,
a_mapper,
b1,
b_mapper,
pairs,
uowcommit,
False,
)
def test_populate_unmapped_dest(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(a_mapper.c.id, a_mapper.c.id)]
assert_raises_message(
orm_exc.UnmappedColumnError,
r"Can't execute sync rule for destination "
r"column 't1.id'; "
r"mapper 'Mapper\[B\(t2\)\]' does not map this column.",
sync._populate,
a1,
a_mapper,
b1,
b_mapper,
pairs,
uowcommit,
False,
)
def test_clear(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(a_mapper.c.id, b_mapper.c.t1id)]
b1.obj().t1id = 8
eq_(b1.obj().__dict__["t1id"], 8)
sync._clear(b1, b_mapper, pairs)
eq_(b1.obj().__dict__["t1id"], None)
def test_clear_pk(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(a_mapper.c.id, b_mapper.c.id)]
b1.obj().id = 8
eq_(b1.obj().__dict__["id"], 8)
assert_raises_message(
AssertionError,
"Dependency rule on column 't1.id' tried to blank-out primary key "
"column 't2.id' on instance '<B",
sync._clear,
b1,
b_mapper,
pairs,
)
def test_clear_unmapped(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(a_mapper.c.id, a_mapper.c.foo)]
assert_raises_message(
orm_exc.UnmappedColumnError,
"Can't execute sync rule for destination "
r"column 't1.foo'; mapper 'Mapper\[B\(t2\)\]' does not "
"map this column.",
sync._clear,
b1,
b_mapper,
pairs,
)
def test_update(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
a1.obj().id = 10
a1._commit_all(a1.dict)
a1.obj().id = 12
pairs = [(a_mapper.c.id, b_mapper.c.id)]
dest = {}
sync._update(a1, a_mapper, dest, "old_", pairs)
eq_(dest, {"id": 12, "old_id": 10})
def test_update_unmapped(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(b_mapper.c.id, b_mapper.c.id)]
dest = {}
assert_raises_message(
orm_exc.UnmappedColumnError,
"Can't execute sync rule for source column 't2.id'; "
r"mapper 'Mapper\[A\(t1\)\]' does not map this column.",
sync._update,
a1,
a_mapper,
dest,
"old_",
pairs,
)
def test_populate_dict(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
a1.obj().id = 10
pairs = [(a_mapper.c.id, b_mapper.c.id)]
dest = {}
sync._populate_dict(a1, a_mapper, dest, pairs)
eq_(dest, {"id": 10})
def test_populate_dict_unmapped(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
a1.obj().id = 10
pairs = [(b_mapper.c.id, b_mapper.c.id)]
dest = {}
assert_raises_message(
orm_exc.UnmappedColumnError,
"Can't execute sync rule for source column 't2.id'; "
r"mapper 'Mapper\[A\(t1\)\]' does not map this column.",
sync._populate_dict,
a1,
a_mapper,
dest,
pairs,
)
def test_source_modified_unmodified(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
a1.obj().id = 10
pairs = [(a_mapper.c.id, b_mapper.c.id)]
eq_(sync._source_modified(uowcommit, a1, a_mapper, pairs), False)
def test_source_modified_no_pairs(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
eq_(sync._source_modified(uowcommit, a1, a_mapper, []), False)
def test_source_modified_modified(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
a1.obj().id = 10
a1._commit_all(a1.dict)
a1.obj().id = 12
pairs = [(a_mapper.c.id, b_mapper.c.id)]
eq_(sync._source_modified(uowcommit, a1, a_mapper, pairs), True)
def test_source_modified_composite(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
a1.obj().foo = 10
a1._commit_all(a1.dict)
a1.obj().foo = 12
pairs = [
(a_mapper.c.id, b_mapper.c.id),
(a_mapper.c.foo, b_mapper.c.id),
]
eq_(sync._source_modified(uowcommit, a1, a_mapper, pairs), True)
def test_source_modified_composite_unmodified(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
a1.obj().foo = 10
a1._commit_all(a1.dict)
pairs = [
(a_mapper.c.id, b_mapper.c.id),
(a_mapper.c.foo, b_mapper.c.id),
]
eq_(sync._source_modified(uowcommit, a1, a_mapper, pairs), False)
def test_source_modified_no_unmapped(self):
uowcommit, a1, b1, a_mapper, b_mapper = self._fixture()
pairs = [(b_mapper.c.id, b_mapper.c.id)]
assert_raises_message(
orm_exc.UnmappedColumnError,
"Can't execute sync rule for source column 't2.id'; "
r"mapper 'Mapper\[A\(t1\)\]' does not map this column.",
sync._source_modified,
uowcommit,
a1,
a_mapper,
pairs,
)
| SyncTest |
python | pytorch__pytorch | torch/_subclasses/fake_tensor.py | {
"start": 20931,
"end": 21669
} | class ____:
debug = os.environ.get("TORCH_FAKE_TENSOR_DEBUG", "0") == "1"
# This memorizes unbacked SymInt or SymFloats representing quantities like the
# number of nonzero elements in this tensor or learning rate. There is one
# instance of the descriptor per particular quantity to memoize.
#
# Memoization is helpful if you do something like x[mask] and y[mask];
# mask.nonzero() gets repeatedly called and should give a consistent unbacked
# SymInt. It needs to be invalidated in the same way constant is.
#
# Making this a descriptor may seem overly fancy, but actually it's the most
# convenient way to ensure access to FakeTensor during access, which is
# required for testing version counter and epoch validity.
| FakeTensorConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bigcommerce/components.py | {
"start": 345,
"end": 1000
} | class ____(AddFields):
def transform(
self,
record: Record,
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> Record:
kwargs = {"record": record, "stream_state": stream_state, "stream_slice": stream_slice}
for parsed_field in self._parsed_fields:
date_time = parsed_field.value.eval(config, **kwargs)
new_date_time = str(pendulum.from_format(date_time, "ddd, D MMM YYYY HH:mm:ss ZZ"))
dpath.util.new(record, parsed_field.path, new_date_time)
return record
| DateTimeTransformer |
python | sympy__sympy | sympy/plotting/series.py | {
"start": 47137,
"end": 49706
} | class ____(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y, label="", **kwargs):
super().__init__(**kwargs)
np = import_module('numpy')
if len(list_x) != len(list_y):
raise ValueError(
"The two lists of coordinates must have the same "
"number of elements.\n"
"Received: len(list_x) = {} ".format(len(list_x)) +
"and len(list_y) = {}".format(len(list_y))
)
self._block_lambda_functions(list_x, list_y)
check = lambda l: [isinstance(t, Expr) and (not t.is_number) for t in l]
if any(check(list_x) + check(list_y)) or self.params:
if not self.params:
raise ValueError("Some or all elements of the provided lists "
"are symbolic expressions, but the ``params`` dictionary "
"was not provided: those elements can't be evaluated.")
self.list_x = Tuple(*list_x)
self.list_y = Tuple(*list_y)
else:
self.list_x = np.array(list_x, dtype=np.float64)
self.list_y = np.array(list_y, dtype=np.float64)
self._expr = (self.list_x, self.list_y)
if not any(isinstance(t, np.ndarray) for t in [self.list_x, self.list_y]):
self._check_fs()
self.is_polar = kwargs.get("is_polar", kwargs.get("polar", False))
self.label = label
self.rendering_kw = kwargs.get("rendering_kw", {})
if self.use_cm and self.color_func:
self.is_parametric = True
if isinstance(self.color_func, Expr):
raise TypeError(
"%s don't support symbolic " % self.__class__.__name__ +
"expression for `color_func`.")
def __str__(self):
return "2D list plot"
def _get_data_helper(self):
"""Returns coordinates that needs to be postprocessed."""
lx, ly = self.list_x, self.list_y
if not self.is_interactive:
return self._eval_color_func_and_return(lx, ly)
np = import_module('numpy')
lx = np.array([t.evalf(subs=self.params) for t in lx], dtype=float)
ly = np.array([t.evalf(subs=self.params) for t in ly], dtype=float)
return self._eval_color_func_and_return(lx, ly)
def _eval_color_func_and_return(self, *data):
if self.use_cm and callable(self.color_func):
return [*data, self.eval_color_func(*data)]
return data
| List2DSeries |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-instagram/unit_tests/integration/test_user_lifetime_insights.py | {
"start": 1524,
"end": 2511
} | class ____(TestCase):
@staticmethod
def _read(config_: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return read_output(
config_builder=config_,
stream_name=_STREAM_NAME,
sync_mode=SyncMode.full_refresh,
expecting_exception=expecting_exception,
)
@HttpMocker()
def test_read_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
get_account_request().build(),
get_account_response(),
)
for breakdown in ["city", "country", "age,gender"]:
lifetime_request = _get_request().with_custom_param("breakdown", breakdown)
http_mocker.get(
lifetime_request.build(),
_get_response().with_record(_record()).build(),
)
output = self._read(config_=config())
# each breakdown should produce a record
assert len(output.records) == 3
| TestFullRefresh |
python | matplotlib__matplotlib | lib/matplotlib/patheffects.py | {
"start": 2161,
"end": 5754
} | class ____(RendererBase):
"""
Implements a Renderer which contains another renderer.
This proxy then intercepts draw calls, calling the appropriate
:class:`AbstractPathEffect` draw method.
.. note::
Not all methods have been overridden on this RendererBase subclass.
It may be necessary to add further methods to extend the PathEffects
capabilities further.
"""
def __init__(self, path_effects, renderer):
"""
Parameters
----------
path_effects : iterable of :class:`AbstractPathEffect`
The path effects which this renderer represents.
renderer : `~matplotlib.backend_bases.RendererBase` subclass
"""
self._path_effects = path_effects
self._renderer = renderer
def copy_with_path_effect(self, path_effects):
return self.__class__(path_effects, self._renderer)
def __getattribute__(self, name):
if name in ['flipy', 'get_canvas_width_height', 'new_gc',
'points_to_pixels', '_text2path', 'height', 'width']:
return getattr(self._renderer, name)
else:
return object.__getattribute__(self, name)
def draw_path(self, gc, tpath, affine, rgbFace=None):
for path_effect in self._path_effects:
path_effect.draw_path(self._renderer, gc, tpath, affine,
rgbFace)
def draw_markers(
self, gc, marker_path, marker_trans, path, *args, **kwargs):
# We do a little shimmy so that all markers are drawn for each path
# effect in turn. Essentially, we induce recursion (depth 1) which is
# terminated once we have just a single path effect to work with.
if len(self._path_effects) == 1:
# Call the base path effect function - this uses the unoptimised
# approach of calling "draw_path" multiple times.
return super().draw_markers(gc, marker_path, marker_trans, path,
*args, **kwargs)
for path_effect in self._path_effects:
renderer = self.copy_with_path_effect([path_effect])
# Recursively call this method, only next time we will only have
# one path effect.
renderer.draw_markers(gc, marker_path, marker_trans, path,
*args, **kwargs)
def draw_path_collection(self, gc, master_transform, paths, *args,
**kwargs):
# We do a little shimmy so that all paths are drawn for each path
# effect in turn. Essentially, we induce recursion (depth 1) which is
# terminated once we have just a single path effect to work with.
if len(self._path_effects) == 1:
# Call the base path effect function - this uses the unoptimised
# approach of calling "draw_path" multiple times.
return super().draw_path_collection(gc, master_transform, paths,
*args, **kwargs)
for path_effect in self._path_effects:
renderer = self.copy_with_path_effect([path_effect])
# Recursively call this method, only next time we will only have
# one path effect.
renderer.draw_path_collection(gc, master_transform, paths,
*args, **kwargs)
def open_group(self, s, gid=None):
return self._renderer.open_group(s, gid)
def close_group(self, s):
return self._renderer.close_group(s)
| PathEffectRenderer |
python | huggingface__transformers | src/transformers/models/xlstm/modeling_xlstm.py | {
"start": 61754,
"end": 66234
} | class ____(xLSTMPreTrainedModel, GenerationMixin):
def __init__(self, config):
super().__init__(config)
self.backbone = xLSTMModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.backbone.set_input_embeddings(new_embeddings)
def prepare_inputs_for_generation(
self,
input_ids,
attention_mask=None, # not used but needed, otherwise generate complains when passing tokenizer inputs
inputs_embeds=None,
use_cache=None,
cache_params: Optional[xLSTMCache] = None,
**kwargs,
):
if use_cache and cache_params is not None:
# If the first cache position is non-zero, we assume we are in generation mode.
# Thus, the cache_params state is assumed to be the state before the last token
# (lastly generated token), and all previous tokens are already ingested.
# This should as well support generation from scratch with the [BOS] token inserted first.
input_ids = input_ids[:, -1:]
if inputs_embeds is not None:
inputs_embeds = inputs_embeds[:, -1:]
if inputs_embeds is not None and cache_params is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update({"cache_params": cache_params, "use_cache": use_cache})
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_params: Optional[xLSTMCache] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
**kwargs,
) -> Union[tuple, xLSTMCausalLMOutput]:
r"""
cache_params (`xLSTMCache`, *optional*):
The xLSTMCache that carries the RNN states.
"""
xlstm_outputs = self.backbone(
input_ids,
cache_params=cache_params,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_hidden_states=output_hidden_states,
**kwargs,
)
hidden_states = xlstm_outputs[0]
logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
if not self.training and self.config.max_inference_chunksize < logits.shape[1]:
offset = 0
with torch.no_grad():
while offset < logits.shape[1]:
logits[:, offset : min(offset + self.config.max_inference_chunksize, logits.shape[1])] = soft_cap(
logits[:, offset : min(offset + self.config.max_inference_chunksize, logits.shape[1])],
self.config.output_logit_soft_cap,
)
offset += self.config.max_inference_chunksize
else:
logits = soft_cap(logits, self.config.output_logit_soft_cap)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
# Shift so that tokens < nstate predict nstate
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
return xLSTMCausalLMOutput(
loss=loss,
logits=logits,
cache_params=xlstm_outputs.cache_params,
hidden_states=xlstm_outputs.hidden_states,
)
__all__ = [
"xLSTMForCausalLM",
"xLSTMModel",
"xLSTMPreTrainedModel",
]
| xLSTMForCausalLM |
python | apache__airflow | task-sdk/src/airflow/sdk/exceptions.py | {
"start": 5027,
"end": 5134
} | class ____(AirflowException):
"""Raise when there is a timeout on sensor polling."""
| AirflowSensorTimeout |
python | keon__algorithms | tests/test_strings.py | {
"start": 19377,
"end": 19893
} | class ____(unittest.TestCase):
"""[summary]
Test for the file knuth_morris_pratt.py
Arguments:
unittest {[type]} -- [description]
"""
def test_knuth_morris_pratt(self):
self.assertEqual([0, 1, 2, 3, 4], knuth_morris_pratt("aaaaaaa", "aaa"))
self.assertEqual([0, 4], knuth_morris_pratt("abcdabc", "abc"))
self.assertEqual([], knuth_morris_pratt("aabcdaab", "aba"))
self.assertEqual([0, 4], knuth_morris_pratt([0,0,1,1,0,0,1,0], [0,0]))
| TestKnuthMorrisPratt |
python | ipython__ipython | IPython/core/oinspect.py | {
"start": 3212,
"end": 4299
} | class ____(TypedDict):
type_name: Optional[str]
base_class: Optional[str]
string_form: Optional[str]
namespace: Optional[str]
length: Optional[str]
file: Optional[str]
definition: Optional[str]
docstring: Optional[str]
source: Optional[str]
init_definition: Optional[str]
class_docstring: Optional[str]
init_docstring: Optional[str]
call_def: Optional[str]
call_docstring: Optional[str]
subclasses: Optional[str]
# These won't be printed but will be used to determine how to
# format the object
ismagic: bool
isalias: bool
isclass: bool
found: bool
name: str
_info_fields = list(InfoDict.__annotations__.keys())
def __getattr__(name):
if name == "info_fields":
warnings.warn(
"IPython.core.oinspect's `info_fields` is considered for deprecation and may be removed in the Future. ",
DeprecationWarning,
stacklevel=2,
)
return _info_fields
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
@dataclass
| InfoDict |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/black/cases/fmtonoff5.py | {
"start": 1548,
"end": 1806
} | class ____(t.Protocol):
def this_will_be_formatted ( self, **kwargs ) -> Named: ...
# fmt: on
# Regression test for https://github.com/psf/black/issues/3436.
if x:
return x
# fmt: off
elif unformatted:
# fmt: on
will_be_formatted ()
| Factory |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 22134,
"end": 22703
} | class ____:
def __init__(self, username: str, entities: List[milvus_types.RoleEntity]) -> None:
self._username = username
roles = []
for entity in entities:
if isinstance(entity, milvus_types.RoleEntity):
roles.append(entity.name)
self._roles = tuple(roles)
def __repr__(self) -> str:
return f"UserItem: <username:{self.username}>, <roles:{self.roles}>"
@property
def username(self):
return self._username
@property
def roles(self):
return self._roles
| UserItem |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/managed_kafka.py | {
"start": 3507,
"end": 30705
} | class ____(GoogleBaseHook):
"""Hook for Managed Service for Apache Kafka APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(gcp_conn_id, impersonation_chain, **kwargs)
def get_managed_kafka_client(self) -> ManagedKafkaClient:
"""Return ManagedKafkaClient object."""
return ManagedKafkaClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
)
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Wait for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def get_confluent_token(self, config_str: str):
"""Get the authentication token for confluent client."""
token_provider = ManagedKafkaTokenProvider(credentials=self.get_credentials())
token = token_provider.confluent_token()
return token
@GoogleBaseHook.fallback_to_default_project_id
def create_cluster(
self,
project_id: str,
location: str,
cluster: types.Cluster | dict,
cluster_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create a new Apache Kafka cluster.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster: Required. Configuration of the cluster to create. Its ``name`` field is ignored.
:param cluster_id: Required. The ID to use for the cluster, which will become the final component of
the cluster's name. The ID must be 1-63 characters long, and match the regular expression
``[a-z]([-a-z0-9]*[a-z0-9])?`` to comply with RFC 1035. This value is structured like: ``my-cluster-id``.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
to avoid duplication of requests. If a request times out or fails, retrying with the same ID
allows the server to recognize the previous attempt. For at least 60 minutes, the server ignores
duplicate requests bearing the same ID. For example, consider a situation where you make an
initial request and the request times out. If you make the request again with the same request ID
within 60 minutes of the last request, the server checks if an original operation with the same
request ID was received. If so, the server ignores the second request. The request ID must be a
valid UUID. A zero UUID is not supported (00000000-0000-0000-0000-000000000000).
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
parent = client.common_location_path(project_id, location)
operation = client.create_cluster(
request={
"parent": parent,
"cluster_id": cluster_id,
"cluster": cluster,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def list_clusters(
self,
project_id: str,
location: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListClustersPager:
"""
List the clusters in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param page_size: Optional. The maximum number of clusters to return. The service may return fewer
than this value. If unspecified, server will pick an appropriate default.
:param page_token: Optional. A page token, received from a previous ``ListClusters`` call. Provide
this to retrieve the subsequent page.
When paginating, all other parameters provided to ``ListClusters`` must match the call that
provided the page token.
:param filter: Optional. Filter expression for the result.
:param order_by: Optional. Order by fields for the result.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
parent = client.common_location_path(project_id, location)
result = client.list_clusters(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_cluster(
self,
project_id: str,
location: str,
cluster_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.Cluster:
"""
Return the properties of a single cluster.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose configuration to return.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
name = client.cluster_path(project_id, location, cluster_id)
result = client.get_cluster(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_cluster(
self,
project_id: str,
location: str,
cluster_id: str,
cluster: types.Cluster | dict,
update_mask: FieldMask | dict,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Update the properties of a single cluster.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose configuration to update.
:param cluster: Required. The cluster to update.
:param update_mask: Required. Field mask is used to specify the fields to be overwritten in the
cluster resource by the update. The fields specified in the update_mask are relative to the
resource, not the full request. A field will be overwritten if it is in the mask.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
to avoid duplication of requests. If a request times out or fails, retrying with the same ID
allows the server to recognize the previous attempt. For at least 60 minutes, the server ignores
duplicate requests bearing the same ID.
For example, consider a situation where you make an initial request and the request times out. If
you make the request again with the same request ID within 60 minutes of the last request, the
server checks if an original operation with the same request ID was received. If so, the server
ignores the second request.
The request ID must be a valid UUID. A zero UUID is not supported (00000000-0000-0000-0000-000000000000).
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
_cluster = deepcopy(cluster) if isinstance(cluster, dict) else Cluster.to_dict(cluster)
_cluster["name"] = client.cluster_path(project_id, location, cluster_id)
operation = client.update_cluster(
request={
"update_mask": update_mask,
"cluster": _cluster,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def delete_cluster(
self,
project_id: str,
location: str,
cluster_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete a single cluster.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster to delete.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
to avoid duplication of requests. If a request times out or fails, retrying with the same ID
allows the server to recognize the previous attempt. For at least 60 minutes, the server ignores
duplicate requests bearing the same ID.
For example, consider a situation where you make an initial request and the request times out. If
you make the request again with the same request ID within 60 minutes of the last request, the
server checks if an original operation with the same request ID was received. If so, the server
ignores the second request.
The request ID must be a valid UUID. A zero UUID is not supported (00000000-0000-0000-0000-000000000000).
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
name = client.cluster_path(project_id, location, cluster_id)
operation = client.delete_cluster(
request={
"name": name,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def create_topic(
self,
project_id: str,
location: str,
cluster_id: str,
topic_id: str,
topic: types.Topic | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.Topic:
"""
Create a new topic in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster in which to create the topic.
:param topic_id: Required. The ID to use for the topic, which will become the final component of the
topic's name.
:param topic: Required. Configuration of the topic to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
parent = client.cluster_path(project_id, location, cluster_id)
result = client.create_topic(
request={
"parent": parent,
"topic_id": topic_id,
"topic": topic,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_topics(
self,
project_id: str,
location: str,
cluster_id: str,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListTopicsPager:
"""
List the topics in a given cluster.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose topics are to be listed.
:param page_size: Optional. The maximum number of topics to return. The service may return fewer than
this value. If unset or zero, all topics for the parent is returned.
:param page_token: Optional. A page token, received from a previous ``ListTopics`` call. Provide this
to retrieve the subsequent page. When paginating, all other parameters provided to ``ListTopics``
must match the call that provided the page token.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
parent = client.cluster_path(project_id, location, cluster_id)
result = client.list_topics(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_topic(
self,
project_id: str,
location: str,
cluster_id: str,
topic_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.Topic:
"""
Return the properties of a single topic.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose topic is to be returned.
:param topic_id: Required. The ID of the topic whose configuration to return.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
name = client.topic_path(project_id, location, cluster_id, topic_id)
result = client.get_topic(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_topic(
self,
project_id: str,
location: str,
cluster_id: str,
topic_id: str,
topic: types.Topic | dict,
update_mask: FieldMask | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.Topic:
"""
Update the properties of a single topic.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose topic is to be updated.
:param topic_id: Required. The ID of the topic whose configuration to update.
:param topic: Required. The topic to update. Its ``name`` field must be populated.
:param update_mask: Required. Field mask is used to specify the fields to be overwritten in the Topic
resource by the update. The fields specified in the update_mask are relative to the resource, not
the full request. A field will be overwritten if it is in the mask.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
_topic = deepcopy(topic) if isinstance(topic, dict) else Topic.to_dict(topic)
_topic["name"] = client.topic_path(project_id, location, cluster_id, topic_id)
result = client.update_topic(
request={
"update_mask": update_mask,
"topic": _topic,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_topic(
self,
project_id: str,
location: str,
cluster_id: str,
topic_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete a single topic.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose topic is to be deleted.
:param topic_id: Required. The ID of the topic to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
name = client.topic_path(project_id, location, cluster_id, topic_id)
client.delete_topic(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_consumer_groups(
self,
project_id: str,
location: str,
cluster_id: str,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListConsumerGroupsPager:
"""
List the consumer groups in a given cluster.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose consumer groups are to be listed.
:param page_size: Optional. The maximum number of consumer groups to return. The service may return
fewer than this value. If unset or zero, all consumer groups for the parent is returned.
:param page_token: Optional. A page token, received from a previous ``ListConsumerGroups`` call.
Provide this to retrieve the subsequent page. When paginating, all other parameters provided to
``ListConsumerGroups`` must match the call that provided the page token.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
parent = client.cluster_path(project_id, location, cluster_id)
result = client.list_consumer_groups(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_consumer_group(
self,
project_id: str,
location: str,
cluster_id: str,
consumer_group_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.ConsumerGroup:
"""
Return the properties of a single consumer group.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose consumer group is to be returned.
:param consumer_group_id: Required. The ID of the consumer group whose configuration to return.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
name = client.consumer_group_path(project_id, location, cluster_id, consumer_group_id)
result = client.get_consumer_group(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_consumer_group(
self,
project_id: str,
location: str,
cluster_id: str,
consumer_group_id: str,
consumer_group: types.ConsumerGroup | dict,
update_mask: FieldMask | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.ConsumerGroup:
"""
Update the properties of a single consumer group.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose topic is to be updated.
:param consumer_group_id: Required. The ID of the consumer group whose configuration to update.
:param consumer_group: Required. The consumer_group to update. Its ``name`` field must be populated.
:param update_mask: Required. Field mask is used to specify the fields to be overwritten in the
ConsumerGroup resource by the update. The fields specified in the update_mask are relative to the
resource, not the full request. A field will be overwritten if it is in the mask.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
_consumer_group = (
deepcopy(consumer_group)
if isinstance(consumer_group, dict)
else ConsumerGroup.to_dict(consumer_group)
)
_consumer_group["name"] = client.consumer_group_path(
project_id, location, cluster_id, consumer_group_id
)
result = client.update_consumer_group(
request={
"update_mask": update_mask,
"consumer_group": _consumer_group,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_consumer_group(
self,
project_id: str,
location: str,
cluster_id: str,
consumer_group_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete a single consumer group.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster_id: Required. The ID of the cluster whose consumer group is to be deleted.
:param consumer_group_id: Required. The ID of the consumer group to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_managed_kafka_client()
name = client.consumer_group_path(project_id, location, cluster_id, consumer_group_id)
client.delete_consumer_group(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
| ManagedKafkaHook |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 1496,
"end": 1608
} | class ____(SQLRole):
__slots__ = ()
_role_name = "ExecutionOption Core or ORM object"
| ExecutableOptionRole |
python | getsentry__sentry | src/sentry/web/frontend/base.py | {
"start": 2186,
"end": 5651
} | class ____(SiloLimit):
def __init__(self, modes: SiloMode | Iterable[SiloMode], internal: bool = False) -> None:
if isinstance(modes, SiloMode):
modes = [modes]
self.modes = frozenset(modes)
self.internal = internal
def modify_endpoint_class(self, decorated_class: type[View]) -> type:
dispatch_override = self.create_override(decorated_class.dispatch)
new_class = type(
decorated_class.__name__,
(decorated_class,),
{
"dispatch": dispatch_override,
"silo_limit": self,
},
)
new_class.__module__ = decorated_class.__module__
return new_class
def modify_endpoint_method(self, decorated_method: Callable[..., Any]) -> Callable[..., Any]:
decorated = self.create_override(decorated_method)
setattr(decorated, "silo_limit", self)
return decorated
def handle_when_unavailable(
self,
original_method: Callable[..., Any],
current_mode: SiloMode,
available_modes: Iterable[SiloMode],
) -> Callable[..., Any]:
def handle(*args: Any, **kwargs: Any) -> HttpResponse:
method, path = self._request_attrs(args, kwargs)
mode_str = ", ".join(str(m) for m in available_modes)
message = (
f"Received {method} request at {path!r} to server in "
f"{current_mode} mode. This view is available only in: {mode_str}"
)
if settings.FAIL_ON_UNAVAILABLE_API_CALL:
raise self.AvailabilityError(message)
else:
logger.warning(message)
return HttpResponseNotFound()
return handle
def _request_attrs(self, args: Iterable[Any], kwargs: Mapping[str, Any]) -> tuple[str, str]:
for arg in args:
if isinstance(arg, HttpRequest):
return (arg.method or "unknown", arg.path)
for value in kwargs.values():
if isinstance(value, HttpRequest):
return (value.method or "unknown", value.path)
return ("unknown", "unknown")
def __call__(self, decorated_obj: Any) -> Any:
if isinstance(decorated_obj, type):
if not issubclass(decorated_obj, View):
raise ValueError("`@ViewSiloLimit` can decorate only View subclasses")
return self.modify_endpoint_class(decorated_obj)
if callable(decorated_obj):
return self.modify_endpoint_method(decorated_obj)
raise TypeError("`@ViewSiloLimit` must decorate a class or method")
control_silo_view = ViewSiloLimit([SiloMode.CONTROL])
"""
Apply to frontend views that exist in CONTROL Silo
If a request is received and the application is not in CONTROL/MONOLITH
mode a 404 will be returned.
"""
region_silo_view = ViewSiloLimit([SiloMode.REGION])
"""
Apply to frontend views that exist in REGION Silo
If a request is received and the application is not in REGION/MONOLITH
mode a 404 will be returned.
"""
all_silo_view = ViewSiloLimit([SiloMode.REGION, SiloMode.CONTROL, SiloMode.MONOLITH])
"""
Apply to frontend views that respond in both CONTROL and REGION mode.
"""
internal_region_silo_view = ViewSiloLimit([SiloMode.REGION], internal=True)
"""
Apply to frontend views that exist in REGION Silo
and are not accessible via cell routing.
This is generally for debug/development views.
"""
| ViewSiloLimit |
python | getsentry__sentry | src/sentry/tsdb/redis.py | {
"start": 1070,
"end": 1823
} | class ____(Generic[T]):
"""\
Wraps a context manager and prevents any exceptions raised either during
the managed block or the exiting of the wrapped manager from propagating.
You probably shouldn't use this.
"""
def __init__(self, wrapped: ContextManager[T]):
self.wrapped = wrapped
def __enter__(self) -> T:
return self.wrapped.__enter__()
def __exit__(self, *args) -> bool:
try:
# allow the wrapped manager to perform any cleanup tasks regardless
# of whether or not we are suppressing an exception raised within
# the managed block
self.wrapped.__exit__(*args)
except Exception:
pass
return True
| SuppressionWrapper |
python | jazzband__django-simple-history | simple_history/registry_tests/migration_test_app/migrations/0001_initial.py | {
"start": 158,
"end": 3723
} | class ____(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="DoYouKnow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
),
migrations.CreateModel(
name="HistoricalYar",
fields=[
(
"id",
models.IntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical yar",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
),
migrations.CreateModel(
name="Yar",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
),
migrations.CreateModel(
name="WhatIMean",
fields=[
(
"doyouknow_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="migration_test_app.DoYouKnow",
),
)
],
bases=("migration_test_app.doyouknow",),
),
migrations.AddField(
model_name="yar",
name="what",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="migration_test_app.WhatIMean",
),
),
migrations.AddField(
model_name="historicalyar",
name="what",
field=models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="migration_test_app.WhatIMean",
),
),
]
| Migration |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-greenhouse/components.py | {
"start": 505,
"end": 1803
} | class ____(LegacyToPerPartitionStateMigration):
declarative_stream: DeclarativeStreamModel
config: Config
def __init__(self, declarative_stream: DeclarativeStreamModel, config: Config):
self._partition_router = declarative_stream.retriever.partition_router
self._cursor = declarative_stream.incremental_sync
self._config = config
self._parameters = declarative_stream.parameters
self._partition_key_field = InterpolatedString.create(
self._get_partition_field(self._partition_router), parameters=self._parameters
).eval(self._config)
self._cursor_field = InterpolatedString.create(self._cursor.cursor_field, parameters=self._parameters).eval(self._config)
def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
"""
LegacyToPerPartitionStateMigration migrates partition keys as string, while real type of id in greenhouse is integer,
which leads to partition mismatch.
To prevent this type casting for partition key was added.
"""
states = [
{"partition": {self._partition_key_field: int(key), "parent_slice": {}}, "cursor": value} for key, value in stream_state.items()
]
return {"states": states}
| GreenhouseStateMigration |
python | getsentry__sentry | tests/sentry/integrations/slack/webhooks/events/test_discover_link_shared.py | {
"start": 1080,
"end": 8251
} | class ____(BaseEventTest):
@pytest.fixture(autouse=True)
def mock_chat_postEphemeral(self):
with patch(
"slack_sdk.web.client.WebClient.chat_postEphemeral",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/chat.postEphemeral",
req_args={},
data={"ok": True},
headers={},
status_code=200,
),
) as self.mock_post:
yield
@pytest.fixture(autouse=True)
def mock_chat_unfurl(self):
with patch(
"slack_sdk.web.client.WebClient.chat_unfurl",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/chat.unfurl",
req_args={},
data={"ok": True},
headers={},
status_code=200,
),
) as self.mock_unfurl:
yield
@responses.activate
@patch(
"sentry.integrations.slack.webhooks.event.match_link",
# match_link will be called twice, for each our links. Resolve into
# two unique links and one duplicate.
side_effect=[
(LinkType.DISCOVER, {"arg1": "value1"}),
(LinkType.DISCOVER, {"arg1", "value2"}),
(LinkType.DISCOVER, {"arg1": "value1"}),
],
)
@patch("sentry.integrations.slack.requests.event.has_discover_links", return_value=True)
@patch(
"sentry.integrations.slack.webhooks.event.link_handlers",
{
LinkType.DISCOVER: Handler(
matcher=[re.compile(r"test")],
arg_mapper=make_type_coercer({}),
fn=Mock(return_value={"link1": "unfurl", "link2": "unfurl"}),
)
},
)
def share_discover_links(self, mock_match_link, mock_):
responses.add(responses.POST, "https://slack.com/api/chat.postEphemeral", json={"ok": True})
responses.add(responses.POST, "https://slack.com/api/chat.unfurl", json={"ok": True})
resp = self.post_webhook(event_data=orjson.loads(LINK_SHARED_EVENT))
assert resp.status_code == 200, resp.content
data = responses.calls[0].request.body
return dict(parse_qsl(data))
@patch(
"sentry.integrations.slack.webhooks.event.match_link",
# match_link will be called twice, for each our links. Resolve into
# two unique links and one duplicate.
side_effect=[
(LinkType.DISCOVER, {"arg1": "value1"}),
(LinkType.DISCOVER, {"arg1", "value2"}),
(LinkType.DISCOVER, {"arg1": "value1"}),
],
)
@patch("sentry.integrations.slack.requests.event.has_discover_links", return_value=True)
@patch(
"sentry.integrations.slack.webhooks.event.link_handlers",
{
LinkType.DISCOVER: Handler(
matcher=[re.compile(r"test")],
arg_mapper=make_type_coercer({}),
fn=Mock(return_value={"link1": "unfurl", "link2": "unfurl"}),
)
},
)
def share_discover_links_sdk(self, mock_match_link, mock_):
resp = self.post_webhook(event_data=orjson.loads(LINK_SHARED_EVENT))
assert resp.status_code == 200, resp.content
return self.mock_unfurl.call_args[1]
@patch(
"sentry.integrations.slack.webhooks.event.match_link",
# match_link will be called twice, for each our links. Resolve into
# two unique links and one duplicate.
side_effect=[
(LinkType.DISCOVER, {"arg1": "value1"}),
(LinkType.DISCOVER, {"arg1", "value2"}),
(LinkType.DISCOVER, {"arg1": "value1"}),
],
)
@patch("sentry.integrations.slack.requests.event.has_discover_links", return_value=True)
@patch(
"sentry.integrations.slack.webhooks.event.link_handlers",
{
LinkType.DISCOVER: Handler(
matcher=[re.compile(r"test")],
arg_mapper=make_type_coercer({}),
fn=Mock(return_value={"link1": "unfurl", "link2": "unfurl"}),
)
},
)
def share_discover_links_ephermeral_sdk(self, mock_match_link, mock_):
resp = self.post_webhook(event_data=orjson.loads(LINK_SHARED_EVENT))
assert resp.status_code == 200, resp.content
return self.mock_post.call_args[1]
def test_share_discover_links_unlinked_user_sdk(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
with self.feature("organizations:discover-basic"):
data = self.share_discover_links_ephermeral_sdk()
blocks = orjson.loads(data["blocks"])
assert blocks[0]["type"] == "section"
assert (
blocks[0]["text"]["text"]
== "Link your Slack identity to Sentry to unfurl Discover charts."
)
assert blocks[1]["type"] == "actions"
assert len(blocks[1]["elements"]) == 2
assert [button["text"]["text"] for button in blocks[1]["elements"]] == ["Link", "Cancel"]
@responses.activate
def test_share_discover_links_unlinked_user_no_channel(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
with self.feature("organizations:discover-basic"):
responses.add(
responses.POST, "https://slack.com/api/chat.postEphemeral", json={"ok": True}
)
responses.add(responses.POST, "https://slack.com/api/chat.unfurl", json={"ok": True})
resp = self.post_webhook(event_data=orjson.loads(LINK_SHARED_EVENT_NO_CHANNEL_NAME))
assert resp.status_code == 200, resp.content
assert len(responses.calls) == 0
def test_share_discover_links_unlinked_user_no_channel_sdk(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
with self.feature("organizations:discover-basic"):
resp = self.post_webhook(event_data=orjson.loads(LINK_SHARED_EVENT_NO_CHANNEL_NAME))
assert resp.status_code == 200, resp.content
assert len(self.mock_post.mock_calls) == 0
def test_share_discover_links_linked_user_sdk(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
Identity.objects.create(
external_id="Uxxxxxxx",
idp=idp,
user=self.user,
status=IdentityStatus.VALID,
scopes=[],
)
data = self.share_discover_links_sdk()
unfurls = data["unfurls"]
# We only have two unfurls since one link was duplicated
assert len(unfurls) == 2
assert unfurls["link1"] == "unfurl"
assert unfurls["link2"] == "unfurl"
| DiscoverLinkSharedEvent |
python | oauthlib__oauthlib | oauthlib/openid/connect/core/exceptions.py | {
"start": 300,
"end": 359
} | class ____(FatalClientError):
pass
| FatalOpenIDClientError |
python | pydata__xarray | xarray/tests/test_indexes.py | {
"start": 26777,
"end": 28581
} | class ____(Index):
@classmethod
def from_variables(cls, variables, *, options=None):
return cls()
def create_variables(self, variables=None):
if variables is None:
# For Coordinates.from_xindex(), return all variables the index can create
return {
"time": Variable(dims=("time",), data=[1, 2, 3]),
"valid_time": Variable(
dims=("time",),
data=[2, 3, 4], # time + 1
attrs={"description": "time + 1"},
),
}
result = dict(variables)
if "time" in variables:
result["valid_time"] = Variable(
dims=("time",),
data=variables["time"].data + 1,
attrs={"description": "time + 1"},
)
return result
def test_set_xindex_with_extra_variables() -> None:
"""Test that set_xindex raises an error when custom index creates extra variables."""
ds = xr.Dataset(coords={"time": [1, 2, 3]}).reset_index("time")
# Test that set_xindex raises error for extra variables
with pytest.raises(ValueError, match="extra variables 'valid_time'"):
ds.set_xindex("time", IndexWithExtraVariables)
def test_set_xindex_factory_method_pattern() -> None:
ds = xr.Dataset(coords={"time": [1, 2, 3]}).reset_index("time")
# Test the recommended factory method pattern
coord_vars = {"time": ds._variables["time"]}
index = IndexWithExtraVariables.from_variables(coord_vars)
coords = xr.Coordinates.from_xindex(index)
result = ds.assign_coords(coords)
assert "time" in result.variables
assert "valid_time" in result.variables
assert_array_equal(result.valid_time.data, result.time.data + 1)
| IndexWithExtraVariables |
python | doocs__leetcode | solution/0400-0499/0408.Valid Word Abbreviation/Solution.py | {
"start": 0,
"end": 551
} | class ____:
def validWordAbbreviation(self, word: str, abbr: str) -> bool:
m, n = len(word), len(abbr)
i = j = x = 0
while i < m and j < n:
if abbr[j].isdigit():
if abbr[j] == "0" and x == 0:
return False
x = x * 10 + int(abbr[j])
else:
i += x
x = 0
if i >= m or word[i] != abbr[j]:
return False
i += 1
j += 1
return i + x == m and j == n
| Solution |
python | huggingface__transformers | src/transformers/models/glm4v/convert_glm4v_mgt_weights_to_hf.py | {
"start": 776,
"end": 32469
} | class ____(pickle.Unpickler):
def find_class(self, mod_name, name):
class DummyClass:
def __init__(self, *args, **kwargs):
pass
if mod_name.startswith("megatron") or mod_name.startswith("glm") or mod_name.startswith("__main__"):
return DummyClass
return super().find_class(mod_name, name)
pickle.Unpickler = UnpicklerWrapper
def dict_access_multi(a_dict, keys):
if len(keys) == 0:
return a_dict
return dict_access_multi(a_dict[keys[0]], keys[1:])
def _build_neox_to_llama_perm(rotary_dim: int) -> torch.Tensor:
half = rotary_dim // 2
perm = torch.empty(rotary_dim, dtype=torch.long)
perm[0::2] = torch.arange(0, half)
perm[1::2] = torch.arange(half, rotary_dim)
return perm
def _apply_rope_permute(q_or_k: torch.Tensor, blocks: int, head_dim: int, rotary_dim: int, neox_to_llama: bool = True):
if rotary_dim == 0:
return q_or_k
if neox_to_llama:
perm = _build_neox_to_llama_perm(rotary_dim).to(q_or_k.device)
else:
perm = torch.empty(rotary_dim, dtype=torch.long, device=q_or_k.device)
half = rotary_dim // 2
perm[0::2] = torch.arange(0, half, device=q_or_k.device)
perm[1::2] = torch.arange(half, rotary_dim, device=q_or_k.device)
inv = torch.empty_like(perm)
inv[perm] = torch.arange(rotary_dim, device=q_or_k.device)
perm = inv
if q_or_k.dim() == 2:
h = q_or_k.view(blocks, head_dim, -1)
h[:, :rotary_dim, ...] = h[:, perm, ...]
return h.reshape(q_or_k.shape)
else:
h = q_or_k.view(blocks, head_dim)
h[:, :rotary_dim] = h[:, perm]
return h.reshape(q_or_k.shape)
def merge_qkv(
sd_list,
original_tp,
num_attention_heads,
multi_query_group_num,
attention_dim,
interleaved_qkv,
convert_neox_to_llama: bool = True,
):
rotary_dim = attention_dim // 2
group_size = (num_attention_heads // multi_query_group_num + 2) * attention_dim
q_chunks, k_chunks, v_chunks = [], [], []
for sd in sd_list:
if interleaved_qkv:
shape = sd.shape
x = sd.view((multi_query_group_num // original_tp, group_size) + shape[1:])
q_, k_, v_ = x.split(
[
(num_attention_heads // multi_query_group_num) * attention_dim,
attention_dim,
attention_dim,
],
dim=1,
)
q_chunks.append(q_.reshape((-1,) + shape[1:]).clone())
k_chunks.append(k_.reshape((-1,) + shape[1:]).clone())
v_chunks.append(v_.reshape((-1,) + shape[1:]).clone())
else:
q_, k_, v_ = sd.split(
[
num_attention_heads * attention_dim // original_tp,
multi_query_group_num * attention_dim // original_tp,
multi_query_group_num * attention_dim // original_tp,
],
dim=0,
)
q_chunks.append(q_.clone())
k_chunks.append(k_.clone())
v_chunks.append(v_.clone())
q = torch.cat(q_chunks, dim=0)
k = torch.cat(k_chunks, dim=0)
v = torch.cat(v_chunks, dim=0)
if convert_neox_to_llama and rotary_dim > 0:
q = _apply_rope_permute(q, num_attention_heads, attention_dim, rotary_dim, neox_to_llama=True)
k = _apply_rope_permute(k, multi_query_group_num, attention_dim, rotary_dim, neox_to_llama=True)
return q, k, v
def merge_qkv_vit(sd_list, original_tp, num_attention_heads, multi_query_group_num, attention_dim):
group_size = (num_attention_heads // multi_query_group_num + 2) * attention_dim
q, k, v = [], [], []
for sd in sd_list:
shape = sd.shape
q_, k_, v_ = sd.view((multi_query_group_num // original_tp, group_size) + (shape[1:])).split(
[
(num_attention_heads // multi_query_group_num * attention_dim),
attention_dim,
attention_dim,
],
dim=1,
)
q_ = q_.reshape((-1,) + (shape[1:]))
k_ = k_.reshape((-1,) + (shape[1:]))
v_ = v_.reshape((-1,) + (shape[1:]))
q.append(q_.clone())
k.append(k_.clone())
v.append(v_.clone())
q = torch.cat(q, dim=0)
k = torch.cat(k, dim=0)
v = torch.cat(v, dim=0)
return q, k, v
def merge_glu(sd_list):
return torch.cat(
[sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list]
+ [sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list],
dim=0,
)
def merge_glu_vit(sd_list, original_tp=None):
if not isinstance(sd_list, list):
sd_list = [sd_list]
gate_proj = torch.cat([sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list], dim=0)
up_proj = torch.cat([sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list], dim=0)
return gate_proj, up_proj
def split_glu(sd, cnt, idx):
return torch.cat(
(
sd.chunk(dim=0, chunks=2)[0].chunk(cnt, dim=0)[idx].clone(),
sd.chunk(dim=0, chunks=2)[1].chunk(cnt, dim=0)[idx].clone(),
),
dim=0,
)
def merge_tensors(
tp_sd,
keys,
original_tp,
target_tp,
current_tp,
slice_dim=None,
merge_fn=None,
):
cnt = original_tp // target_tp
offset = cnt * current_tp
sd_list = [dict_access_multi(tp_sd[i + offset], keys) for i in range(cnt)]
if slice_dim is not None:
return torch.cat(sd_list, dim=slice_dim)
assert merge_fn is not None
return merge_fn(sd_list)
def save_sharded_model(state_dict, output_path, max_shard_size_gb=5, num_layers=40, vision_num_layers=24):
os.makedirs(output_path, exist_ok=True)
layered_dict = {}
for layer_idx in range(num_layers):
layer_key = f"layer_{layer_idx}"
layered_dict[layer_key] = {}
for key, value in state_dict.items():
if f"model.language_model.layers.{layer_idx}." in key:
if isinstance(value, list):
assert len(value) == 1, f"{key} {value}"
value = value[0]
layered_dict[layer_key][key] = value
for layer_idx in range(vision_num_layers):
layer_key = f"visual_layer_{layer_idx}"
layered_dict[layer_key] = {}
for key, value in state_dict.items():
if f"model.visual.blocks.{layer_idx}." in key:
layered_dict[layer_key][key] = value
layered_dict["others"] = {}
for key, value in state_dict.items():
if not any(f"model.language_model.layers.{i}." in key for i in range(num_layers)) and not any(
f"model.visual.blocks.{i}." in key for i in range(vision_num_layers)
):
layered_dict["others"][key] = value
# Determine layer ordering
layer_order = []
for i in range(num_layers):
layer_order.append(f"layer_{i}")
for i in range(vision_num_layers):
layer_order.append(f"visual_layer_{i}")
layer_order.append("others")
# Calculate sizes and create shards by layer
param_sizes = {}
shards = []
current_shard = {}
current_shard_size = 0
max_shard_size_bytes = max_shard_size_gb * 1024 * 1024 * 1024
for layer_key in layer_order:
layer_weights = layered_dict[layer_key]
layer_size = sum(param.numel() * param.element_size() for param in layer_weights.values())
if current_shard_size + layer_size > max_shard_size_bytes and current_shard:
shards.append(current_shard)
current_shard = {}
current_shard_size = 0
for param_name, param in layer_weights.items():
current_shard[param_name] = param
current_shard_size += param.numel() * param.element_size()
param_sizes[param_name] = param.numel() * param.element_size()
if current_shard:
shards.append(current_shard)
index_dict = {"metadata": {"total_size": sum(param_sizes.values())}, "weight_map": {}}
for i, shard in enumerate(shards):
shard_filename = f"model-{i + 1:05d}-of-{len(shards):05d}.safetensors"
shard_path = os.path.join(output_path, shard_filename)
for param_name in shard:
index_dict["weight_map"][param_name] = shard_filename
save_file(shard, shard_path, metadata={"format": "pt"})
print(f"Saved shard {i + 1}/{len(shards)}: {shard_filename}")
print(f" Shard size: {sum(p.numel() * p.element_size() for p in shard.values()) / (1024**3):.2f} GB")
print(f" Keys in shard: {len(shard)}")
index_path = os.path.join(output_path, "model.safetensors.index.json")
with open(index_path, "w") as f:
json.dump(index_dict, f, indent=2)
return len(shards)
def merge_tp_weights(model_path, output_path, vllm_config_path=None):
origin_tp, origin_ep, origin_pp = -1, -1, -1
check_ep_or_pp_later = False
for item in Path(model_path).iterdir():
if item.is_dir():
match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?", item.name)
if match:
groups = match.groups()
tp = int(groups[0])
origin_tp = max(origin_tp, tp + 1)
# maybe TP-EP or TP-PP, need check later
if groups[1] is not None and groups[2] is None:
pp = int(groups[1])
origin_pp = max(origin_pp, pp + 1)
origin_ep = 1
check_ep_or_pp_later = True
elif groups[1] is not None and groups[2] is not None:
pp = int(groups[1])
ep = int(groups[2])
origin_pp = max(origin_pp, pp + 1)
origin_ep = max(origin_ep, ep + 1)
else:
origin_ep = 1
origin_pp = 1
tensor_names_by_file = {}
mgt_sd = {}
for item in Path(model_path).iterdir():
if item.is_dir():
match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?$", item.name)
if match:
groups = match.groups()
tp = int(groups[0])
pp = int(groups[1]) if groups[1] is not None else 0
ep = int(groups[2]) if groups[2] is not None else 0
file_path = item / "model_optim_rng.pt"
assert file_path.exists(), f"model_optim_rng.pt not found in {item}"
file_sd = torch.load(file_path, map_location="cpu", weights_only=False)
for k in list(file_sd.keys()):
if "_extra_state" in k or "dummy_parameter" in k:
file_sd.pop(k)
mgt_sd[(tp, pp, ep)] = file_sd
tensor_names = set()
if "model" in file_sd:
for key in file_sd["model"].keys():
tensor_names.add(key)
tensor_names_by_file[(tp, pp, ep)] = tensor_names
change_pp_to_ep = False
if check_ep_or_pp_later:
prefix_distribution = {}
for (tp, pp, ep), prefixes in tensor_names_by_file.items():
for prefix in prefixes:
if prefix not in prefix_distribution:
prefix_distribution[prefix] = set()
prefix_distribution[prefix].add((tp, pp, ep))
for prefix, locations in prefix_distribution.items():
if len(locations) > 1:
pp_values = {loc[1] for loc in locations}
if len(pp_values) > 1:
print(f"find '{prefix}' in multi ranks {pp_values} the parallelism should be TP-EP")
origin_ep = origin_pp
origin_pp = 1
change_pp_to_ep = True
break
else:
print(f"find '{prefix}' only in one ep, parallelism should be TP-PP")
break
print(f"Detected tensor parallel degree TP={origin_tp} EP={origin_ep} PP={origin_pp}")
assert max(origin_tp, origin_ep) * origin_pp == len(tensor_names_by_file), "maybe some problem in origin weight"
organized_sd = {}
for (tp, pp, ep), file_sd in mgt_sd.items():
if change_pp_to_ep:
pp, ep = ep, pp
organized_sd.setdefault(pp, {})
organized_sd[pp][(ep, tp)] = file_sd
find_vpp = "model0" in file_sd
# support VPP, if each pp rank has n vpp blocks, we will treat the original model
# was parallel as pp n * origin_pp
if find_vpp:
organized_sd_vpp = {}
for i in range(origin_pp):
for (ep, tp), file_sd in organized_sd[i].items():
model_keys = sorted(
[key for key in file_sd.keys() if key.startswith("model") and key[5:].isdigit()],
key=lambda x: int(x[5:]),
)
vp_blocks = len(model_keys)
for idx, key in enumerate(model_keys):
assert key in file_sd, f"model {key} not found"
organized_sd_vpp.setdefault(idx * origin_pp + i, {})
organized_sd_vpp[idx * origin_pp + i][(ep, tp)] = {"model": file_sd[key]}
origin_pp = origin_pp * vp_blocks
organized_sd = organized_sd_vpp
ignore_list = ["_extra_state", "dummy_parameter"]
layer_share_list = [
"norm",
"conv3d",
"downsample",
"router",
"mlp.linear_fc2.bias",
"self_attention.linear_proj.bias",
"position_embeddings",
]
full_weights = {}
vit_layer_offset = 0
llm_layer_offset = 0
llm_layer_pattern = re.compile(r"^(decoder\.layers\.)(\d+)(\..*)$")
vit_layer_pattern = re.compile(r"^(vision_model\.transformer\.layers\.)(\d+)(\..*)$")
for pp in sorted(organized_sd.keys()):
pp_dict = organized_sd[pp]
next_llm_layer_offset = llm_layer_offset
next_vit_layer_offset = vit_layer_offset
ep_map = {}
tp_map = {}
tp_seen = set()
for (ep, tp), item in pp_dict.items():
if tp not in tp_seen:
tp_seen.add(tp)
tp_map[tp] = item
ep_map[ep] = item
for tp in sorted(tp_map.keys()):
sd = tp_map[tp]
for full_name, tensor in sd["model"].items():
if any(x in full_name for x in ignore_list):
continue
llm_name_match = llm_layer_pattern.match(full_name)
if llm_name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=llm_layer_offset):
nonlocal next_llm_layer_offset
_real_layer = int(x.group(2)) + offset
next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = llm_layer_pattern.sub(offset_layer, full_name)
vit_name_match = vit_layer_pattern.match(full_name)
if vit_name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=vit_layer_offset):
nonlocal next_vit_layer_offset
_real_layer = int(x.group(2)) + offset
next_vit_layer_offset = max(next_vit_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = vit_layer_pattern.sub(offset_layer, full_name)
if layer_share_list and any(x in full_name for x in layer_share_list):
if full_name not in full_weights:
full_weights[full_name] = tensor
else:
assert torch.equal(tensor, full_weights[full_name]), (
f"detect diff param in tp named: {full_name}"
)
elif not re.search(r"\.experts\.", full_name):
full_weights.setdefault(full_name, [None for _ in range(origin_tp)])
full_weights[full_name][tp] = tensor
for ep in sorted(ep_map.keys()):
sd = ep_map[ep]
for full_name, tensor in sd["model"].items():
if any(x in full_name for x in ignore_list):
continue
name_match = llm_layer_pattern.match(full_name)
if name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=llm_layer_offset):
nonlocal next_llm_layer_offset
_real_layer = int(x.group(2)) + offset
next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = llm_layer_pattern.sub(offset_layer, full_name)
if re.search(r"\.experts\.", full_name):
full_weights.setdefault(full_name, [None for _ in range(origin_ep)])
full_weights[full_name][ep] = tensor
llm_layer_offset = next_llm_layer_offset
vit_layer_offset = next_vit_layer_offset
for k in sorted(full_weights.keys()):
item = full_weights[k]
if isinstance(item, list):
print(f"{k} {len(item)} {item[0].shape} {item[0].dtype}", flush=True)
else:
print(f"{k} {item.shape} {item.dtype}", flush=True)
print(f"Loading vLLM configuration file: {vllm_config_path}")
with open(vllm_config_path, "r") as f:
model_config = json.load(f)
text_config = model_config.get("text_config", {})
vision_config = model_config.get("vision_config", {})
num_layers = text_config.get("num_hidden_layers", 40)
num_heads = text_config.get("num_attention_heads", 32)
num_kv_heads = text_config.get("num_key_value_heads", 2)
hidden_size = model_config.get("hidden_size", 4096)
head_dim = model_config.get("attention_dim", hidden_size // num_heads)
vision_num_layers = vision_config.get("depth", 24)
vit_n_head = vision_config.get("num_heads", 12)
print(
f"Model parameters: num_layers={num_layers}, vision_num_layers={vision_num_layers}, "
f"num_heads={num_heads}, multi_query_group_num={num_kv_heads}"
)
print("Merging tensor parallel weights...")
interleaved_qkv = True
num_attention_heads = num_heads
multi_query_group_num = num_kv_heads
attention_dim = head_dim
complete_state_dict = {}
# LLM
layer_i = 0
while f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights:
if f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.input_layernorm.weight"] = full_weights[
f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight"
]
if f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = (
full_weights[f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight"]
)
elif f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = (
full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight"]
)
# GLM-4.1V Only
if f"decoder.layers.{layer_i}.post_mlp_layernorm.weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_mlp_layernorm.weight"] = full_weights[
f"decoder.layers.{layer_i}.post_mlp_layernorm.weight"
]
if f"decoder.layers.{layer_i}.post_self_attn_layernorm.weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_self_attn_layernorm.weight"] = (
full_weights[f"decoder.layers.{layer_i}.post_self_attn_layernorm.weight"]
)
q, k, v = merge_qkv(
sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.weight"],
original_tp=origin_tp,
num_attention_heads=num_attention_heads,
multi_query_group_num=multi_query_group_num,
attention_dim=attention_dim,
interleaved_qkv=interleaved_qkv,
)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.weight"] = q.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.weight"] = k.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.weight"] = v.clone()
if f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias" in full_weights:
q_bias, k_bias, v_bias = merge_qkv(
sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias"],
original_tp=origin_tp,
num_attention_heads=num_attention_heads,
multi_query_group_num=multi_query_group_num,
attention_dim=attention_dim,
interleaved_qkv=interleaved_qkv,
)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.bias"] = q_bias.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.bias"] = k_bias.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.bias"] = v_bias.clone()
o_proj = torch.cat(full_weights[f"decoder.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.o_proj.weight"] = o_proj.clone()
# MLP - Use gate_up_proj
gate_up_proj = torch.cat(full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.weight"], dim=0)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate_up_proj.weight"] = gate_up_proj.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1
)
layer_i += 1
# Embedd Model, LM Head, and Norm
embed_tokens = torch.cat(full_weights["embedding.word_embeddings.weight"], dim=0)
complete_state_dict["model.language_model.embed_tokens.weight"] = embed_tokens.clone()
lm_head = torch.cat(full_weights["output_layer.weight"], dim=0)
complete_state_dict["lm_head.weight"] = lm_head.clone()
complete_state_dict["model.language_model.norm.weight"] = full_weights["decoder.final_layernorm.weight"].clone()
# VLM
for layer_i in range(vision_num_layers):
complete_state_dict[f"model.visual.blocks.{layer_i}.norm1.weight"] = full_weights[
f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight"
]
complete_state_dict[f"model.visual.blocks.{layer_i}.norm2.weight"] = full_weights[
f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight"
]
q, k, v = merge_qkv_vit(
sd_list=full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.weight"],
original_tp=origin_tp,
num_attention_heads=vit_n_head,
multi_query_group_num=vit_n_head,
attention_dim=attention_dim,
)
complete_state_dict[f"model.visual.blocks.{layer_i}.attn.qkv.weight"] = torch.cat((q, k, v), dim=0)
proj_weight = torch.cat(
full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1
)
complete_state_dict[f"model.visual.blocks.{layer_i}.attn.proj.weight"] = proj_weight.clone()
gate_proj_weight, up_proj_weight = merge_glu_vit(
full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.weight"]
)
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.gate_proj.weight"] = gate_proj_weight.clone()
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.up_proj.weight"] = up_proj_weight.clone()
down_proj_weight = torch.cat(
full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1
)
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.down_proj.weight"] = down_proj_weight.clone()
complete_state_dict["model.visual.downsample.weight"] = (
full_weights["vision_model.downsample.weight"].clone().contiguous()
)
complete_state_dict["model.visual.downsample.bias"] = (
full_weights["vision_model.downsample.bias"].clone().contiguous()
)
# Merger
gate_proj, up_proj = merge_glu_vit(full_weights["vision_projection.encoder.linear_fc1.weight"])
down_proj = torch.cat(full_weights["vision_projection.encoder.linear_fc2.weight"], dim=1)
proj = torch.cat(full_weights["vision_projection.linear_fc_extra.weight"], dim=0)
complete_state_dict["model.visual.merger.gate_proj.weight"] = gate_proj.clone().contiguous()
complete_state_dict["model.visual.merger.up_proj.weight"] = up_proj.clone().contiguous()
complete_state_dict["model.visual.merger.down_proj.weight"] = down_proj.clone().contiguous()
complete_state_dict["model.visual.merger.proj.weight"] = proj.clone().contiguous()
if "vision_projection.layer_norm.weight" in full_weights:
complete_state_dict["model.visual.merger.post_projection_norm.weight"] = full_weights[
"vision_projection.layer_norm.weight"
]
if "vision_projection.layer_norm.bias" in full_weights:
complete_state_dict["model.visual.merger.post_projection_norm.bias"] = full_weights[
"vision_projection.layer_norm.bias"
]
complete_state_dict["model.visual.embeddings.position_embedding.weight"] = (
full_weights["vision_model.position_embeddings.weight"].clone().contiguous()
)
complete_state_dict["model.visual.patch_embed.proj.weight"] = (
full_weights["vision_model.conv3d.weight"].clone().contiguous()
)
complete_state_dict["model.visual.patch_embed.proj.bias"] = (
full_weights["vision_model.conv3d.bias"].clone().contiguous()
)
# Check for additional vision model norm layers mentioned in the expected output
if "vision_model.post_conv_layernorm.weight" in full_weights:
complete_state_dict["model.visual.post_conv_layernorm.weight"] = (
full_weights["vision_model.post_conv_layernorm.weight"].clone().contiguous()
)
if "vision_model.post_layernorm.weight" in full_weights:
complete_state_dict["model.visual.post_layernorm.weight"] = (
full_weights["vision_model.post_layernorm.weight"].clone().contiguous()
)
print(f"Total keys in state dict: {len(complete_state_dict)}")
save_sharded_model(
complete_state_dict,
output_path=output_path,
max_shard_size_gb=5,
num_layers=num_layers,
vision_num_layers=vision_num_layers,
)
hf_config = {
"architectures": ["Glm4vForConditionalGeneration"],
"model_type": "glm4v",
"image_start_token_id": model_config.get("image_start_token_id", 151339),
"image_end_token_id": model_config.get("image_end_token_id", 151340),
"video_start_token_id": model_config.get("video_start_token_id", 151341),
"video_end_token_id": model_config.get("video_end_token_id", 151342),
"transformers_version": "4.57.1",
}
txt_config = {
"model_type": "glm4v_text",
"attention_bias": model_config.get("add_qkv_bias", True),
"attention_dropout": 0.0,
"pad_token_id": model_config.get("pad_token_id", 151329),
"eos_token_id": model_config.get("eos_token_id", [151329, 151336, 151338]),
"image_token_id": model_config.get("image_token_id", 151363),
"video_token_id": model_config.get("video_token_id", 151364),
"hidden_act": text_config.get("hidden_act", "silu"),
"hidden_size": text_config.get("hidden_size", 4096),
"initializer_range": 0.02,
"intermediate_size": text_config.get("intermediate_size", 13696),
"max_position_embeddings": text_config.get("seq_length", 131072),
"num_attention_heads": text_config.get("num_attention_heads", 32),
"num_hidden_layers": text_config.get("num_layers", 40),
"num_key_value_heads": text_config.get("num_key_value_heads", 2),
"rms_norm_eps": text_config.get("layernorm_epsilon", 1e-05),
"dtype": text_config.get("torch_dtype", "bfloat16"),
"use_cache": text_config.get("use_cache", True),
"vocab_size": text_config.get("vocab_size", 151552),
"tie_word_embeddings": False,
"rope_parameters": {
"rope_type": "default",
"rope_theta": 10000.0,
"mrope_section": [8, 12, 12],
"partial_rotary_factor": 0.5,
},
}
hf_config["text_config"] = txt_config
if "vision_config" in model_config:
vision_config = {
"model_type": "glm4v_vision",
"hidden_size": model_config["vision_config"].get("hidden_size", 1536),
"depth": model_config["vision_config"].get("num_layers", 24),
"num_heads": model_config["vision_config"].get("num_attention_heads", 12),
"attention_bias": model_config["vision_config"].get("attention_bias", False),
"intermediate_size": model_config.get("ffn_hidden_size", 13696),
"hidden_act": model_config["vision_config"].get("hidden_act", "silu"),
"hidden_dropout_prob": model_config["vision_config"].get("hidden_dropout_prob", 0.0),
"initializer_range": 0.02,
"image_size": model_config["vision_config"].get("image_size", 336),
"patch_size": model_config["vision_config"].get("patch_size", 14),
"out_hidden_size": model_config.get("hidden_size", 4096),
"rms_norm_eps": model_config["vision_config"].get("layernorm_epsilon", 1e-05),
"spatial_merge_size": model_config["vision_config"].get("downsample_ratio", 2),
"temporal_patch_size": model_config["vision_config"].get("t_patch", 2),
}
hf_config["vision_config"] = vision_config
config_path = os.path.join(output_path, "config.json")
with open(config_path, "w") as f:
json.dump(hf_config, f, indent=2)
print(f"Conversion complete! Model saved to {output_path}")
def parse_args():
parser = argparse.ArgumentParser(description="Convert Megatron model to HuggingFace format")
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to Megatron model directory",
)
parser.add_argument("--output_path", type=str, required=True, help="Output path for HuggingFace model directory")
parser.add_argument(
"--config_path", type=str, help="Path to vLLM configuration file for creating HuggingFace config"
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
merge_tp_weights(args.model_path, args.output_path, args.config_path)
| UnpicklerWrapper |
python | django-guardian__django-guardian | guardian/testapp/tests/test_other.py | {
"start": 10345,
"end": 11416
} | class ____(TestCase):
@mock.patch("django.contrib.auth.get_user_model")
def test_monkey_patch(self, mocked_get_user_model):
class CustomUserTestClass(AbstractUser):
pass
mocked_get_user_model.return_value = CustomUserTestClass
self.assertFalse(getattr(CustomUserTestClass, "get_anonymous", False))
self.assertFalse(getattr(CustomUserTestClass, "add_obj_perm", False))
self.assertFalse(getattr(CustomUserTestClass, "del_obj_perm", False))
self.assertFalse(getattr(CustomUserTestClass, "evict_obj_perms_cache", False))
# Monkey Patch
guardian.monkey_patch_user()
self.assertTrue(getattr(CustomUserTestClass, "get_anonymous", False))
self.assertTrue(getattr(CustomUserTestClass, "add_obj_perm", False))
self.assertTrue(getattr(CustomUserTestClass, "del_obj_perm", False))
self.assertTrue(getattr(CustomUserTestClass, "evict_obj_perms_cache", False))
user = CustomUserTestClass()
self.assertFalse(user.evict_obj_perms_cache())
| TestMonkeyPatch |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/ecp_viz_sdk/package.py | {
"start": 216,
"end": 490
} | class ____(Package):
"""Package that has a dependency with a variant which
adds a transitive dependency forced to use non default
values.
"""
homepage = "https://dev.null"
version("1.0")
depends_on("conditional-constrained-dependencies")
| EcpVizSdk |
python | ansible__ansible | test/lib/ansible_test/_internal/http.py | {
"start": 3649,
"end": 3865
} | class ____(ApplicationError):
"""HTTP response as an error."""
def __init__(self, status: int, message: str) -> None:
super().__init__('%s: %s' % (status, message))
self.status = status
| HttpError |
python | wandb__wandb | wandb/vendor/pygments/lexers/dylan.py | {
"start": 512,
"end": 8300
} | class ____(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
.. versionadded:: 0.7
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
flags = re.IGNORECASE
builtins = set((
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'))
keywords = set((
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'))
operators = set((
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'))
functions = set((
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'))
valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
default('code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#b[01]+', Number.Bin),
# octal integer
(r'#o[0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#x[0-9a-f]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
(r'<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + '\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
| DylanLexer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 19101,
"end": 19401
} | class ____(GithubStream):
"""
API docs: https://docs.github.com/en/rest/issues/labels?apiVersion=2022-11-28#list-labels-for-a-repository
"""
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"repos/{stream_slice['repository']}/labels"
| IssueLabels |
python | dask__distributed | distributed/shuffle/_worker_plugin.py | {
"start": 8496,
"end": 14969
} | class ____(WorkerPlugin):
"""Interface between a Worker and a Shuffle.
This extension is responsible for
- Lifecycle of Shuffle instances
- ensuring connectivity between remote shuffle instances
- ensuring connectivity and integration with the scheduler
- routing concurrent calls to the appropriate `Shuffle` based on its `ShuffleID`
- collecting instrumentation of ongoing shuffles and route to scheduler/worker
"""
worker: Worker
shuffle_runs: _ShuffleRunManager
memory_limiter_comms: ResourceLimiter
memory_limiter_disk: ResourceLimiter
closed: bool
def setup(self, worker: Worker) -> None:
# Attach to worker
worker.handlers["shuffle_receive"] = self.shuffle_receive
worker.handlers["shuffle_inputs_done"] = self.shuffle_inputs_done
worker.stream_handlers["shuffle-fail"] = self.shuffle_fail
worker.extensions["shuffle"] = self
# Initialize
self.worker = worker
self.shuffle_runs = _ShuffleRunManager(self)
comm_limit = parse_bytes(dask.config.get("distributed.p2p.comm.buffer"))
self.memory_limiter_comms = ResourceLimiter(
comm_limit, metrics_label="p2p-comms-limiter"
)
storage_limit = parse_bytes(dask.config.get("distributed.p2p.storage.buffer"))
self.memory_limiter_disk = ResourceLimiter(
storage_limit, metrics_label="p2p-disk-limiter"
)
self.closed = False
nthreads = (
dask.config.get("distributed.p2p.threads") or self.worker.state.nthreads
)
self._executor = ThreadPoolExecutor(nthreads)
def __str__(self) -> str:
return f"ShuffleWorkerPlugin on {self.worker.address}"
def __repr__(self) -> str:
return f"<ShuffleWorkerPlugin, worker={self.worker.address_safe!r}, closed={self.closed}>"
# Handlers
##########
# NOTE: handlers are not threadsafe, but they're called from async comms, so that's okay
def heartbeat(self) -> dict[ShuffleId, Any]:
return self.shuffle_runs.heartbeat()
async def shuffle_receive(
self,
shuffle_id: ShuffleId,
run_id: int,
data: list[tuple[int, Any]] | bytes,
) -> OKMessage | ErrorMessage:
"""
Handler: Receive an incoming shard of data from a peer worker.
Using an unknown ``shuffle_id`` is an error.
"""
try:
shuffle_run = await self._get_shuffle_run(shuffle_id, run_id)
return await shuffle_run.receive(data)
except P2PConsistencyError as e:
return error_message(e)
async def shuffle_inputs_done(self, shuffle_id: ShuffleId, run_id: int) -> None:
"""
Handler: Inform the extension that all input partitions have been handed off to extensions.
Using an unknown ``shuffle_id`` is an error.
"""
shuffle_run = await self._get_shuffle_run(shuffle_id, run_id)
await shuffle_run.inputs_done()
def shuffle_fail(self, shuffle_id: ShuffleId, run_id: int, message: str) -> None:
"""Fails the shuffle run with the message as exception and triggers cleanup.
.. warning::
To guarantee the correct order of operations, shuffle_fail must be
synchronous. See
https://github.com/dask/distributed/pull/7486#discussion_r1088857185
for more details.
"""
self.shuffle_runs.fail(shuffle_id=shuffle_id, run_id=run_id, message=message)
def add_partition(
self,
data: Any,
partition_id: int | NDIndex,
id: ShuffleId,
**kwargs: Any,
) -> int:
shuffle_run = self.get_or_create_shuffle(id)
return shuffle_run.add_partition(
data=data,
partition_id=partition_id,
**kwargs,
)
async def _barrier(self, shuffle_id: ShuffleId, run_ids: Sequence[int]) -> int:
"""
Task: Note that the barrier task has been reached (`add_partition` called for all input partitions)
Using an unknown ``shuffle_id`` is an error. Calling this before all partitions have been
added is undefined.
"""
shuffle_run = await self.shuffle_runs.get_most_recent(shuffle_id, run_ids)
# Tell all peers that we've reached the barrier
# Note that this will call `shuffle_inputs_done` on our own worker as well
return await shuffle_run.barrier(run_ids)
async def _get_shuffle_run(
self,
shuffle_id: ShuffleId,
run_id: int,
) -> ShuffleRun:
return await self.shuffle_runs.get_with_run_id(
shuffle_id=shuffle_id, run_id=run_id
)
async def teardown(self, worker: Worker) -> None:
assert not self.closed
self.closed = True
await self.shuffle_runs.teardown()
try:
self._executor.shutdown(cancel_futures=True)
except Exception: # pragma: no cover
self._executor.shutdown()
#############################
# Methods for worker thread #
#############################
def barrier(self, shuffle_id: ShuffleId, run_ids: Sequence[int]) -> int:
result = sync(self.worker.loop, self._barrier, shuffle_id, run_ids)
return result
def get_shuffle_run(
self,
shuffle_id: ShuffleId,
run_id: int,
) -> ShuffleRun:
return sync(
self.worker.loop,
self.shuffle_runs.get_with_run_id,
shuffle_id,
run_id,
)
def get_or_create_shuffle(
self,
shuffle_id: ShuffleId,
) -> ShuffleRun:
key = thread_state.key
return sync(
self.worker.loop,
self.shuffle_runs.get_or_create,
shuffle_id,
key,
)
def get_output_partition(
self,
shuffle_id: ShuffleId,
run_id: int,
partition_id: int | NDIndex,
meta: pd.DataFrame | None = None,
) -> Any:
"""
Task: Retrieve a shuffled output partition from the ShuffleWorkerPlugin.
Calling this for a ``shuffle_id`` which is unknown or incomplete is an error.
"""
shuffle_run = self.get_shuffle_run(shuffle_id, run_id)
key = thread_state.key
return shuffle_run.get_output_partition(
partition_id=partition_id,
key=key,
meta=meta,
)
| ShuffleWorkerPlugin |
python | numpy__numpy | numpy/f2py/tests/test_quoted_character.py | {
"start": 103,
"end": 477
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "quoted_character", "foo.f")]
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.slow
def test_quoted_character(self):
assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")")
| TestQuotedCharacter |
python | encode__django-rest-framework | tests/test_renderers.py | {
"start": 3801,
"end": 4153
} | class ____(APIView):
renderer_classes = (BrowsableAPIRenderer,)
permission_classes = (POSTDeniedPermission,)
def get(self, request):
return Response()
def post(self, request):
return Response()
def put(self, request):
return Response()
def patch(self, request):
return Response()
| POSTDeniedView |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/secret.py | {
"start": 1064,
"end": 5317
} | class ____(K8SModel):
"""Defines Kubernetes Secret Volume."""
def __init__(self, deploy_type, deploy_target, secret, key=None, items=None):
"""
Initialize a Kubernetes Secret Object.
Used to track requested secrets from the user.
:param deploy_type: The type of secret deploy in Kubernetes, either `env` or
`volume`
:param deploy_target: (Optional) The environment variable when
`deploy_type` `env` or file path when `deploy_type` `volume` where
expose secret. If `key` is not provided deploy target should be None.
:param secret: Name of the secrets object in Kubernetes
:param key: (Optional) Key of the secret within the Kubernetes Secret
if not provided in `deploy_type` `env` it will mount all secrets in object
:param items: (Optional) items that can be added to a volume secret for specifying projects of
secret keys to paths
https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths
"""
if deploy_type not in ("env", "volume"):
raise AirflowConfigException("deploy_type must be env or volume")
self.deploy_type = deploy_type
self.deploy_target = deploy_target
self.items = items or []
if deploy_target is not None and deploy_type == "env":
# if deploying to env, capitalize the deploy target
self.deploy_target = deploy_target.upper()
if key is not None and deploy_target is None:
raise AirflowConfigException("If `key` is set, `deploy_target` should not be None")
self.secret = secret
self.key = key
def to_env_secret(self) -> k8s.V1EnvVar:
"""Store es environment secret."""
return k8s.V1EnvVar(
name=self.deploy_target,
value_from=k8s.V1EnvVarSource(
secret_key_ref=k8s.V1SecretKeySelector(name=self.secret, key=self.key)
),
)
def to_env_from_secret(self) -> k8s.V1EnvFromSource:
"""Read from environment to secret."""
return k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name=self.secret))
def to_volume_secret(self) -> tuple[k8s.V1Volume, k8s.V1VolumeMount]:
"""Convert to volume secret."""
vol_id = f"secretvol{uuid.uuid4()}"
volume = k8s.V1Volume(name=vol_id, secret=k8s.V1SecretVolumeSource(secret_name=self.secret))
if self.items:
volume.secret.items = self.items
return (volume, k8s.V1VolumeMount(mount_path=self.deploy_target, name=vol_id, read_only=True))
def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod:
"""Attach to pod."""
cp_pod = copy.deepcopy(pod)
if self.deploy_type == "volume":
volume, volume_mount = self.to_volume_secret()
if cp_pod.spec.volumes is None:
cp_pod.spec.volumes = []
cp_pod.spec.volumes.append(volume)
if cp_pod.spec.containers[0].volume_mounts is None:
cp_pod.spec.containers[0].volume_mounts = []
cp_pod.spec.containers[0].volume_mounts.append(volume_mount)
if self.deploy_type == "env" and self.key is not None:
env = self.to_env_secret()
if cp_pod.spec.containers[0].env is None:
cp_pod.spec.containers[0].env = []
cp_pod.spec.containers[0].env.append(env)
if self.deploy_type == "env" and self.key is None:
env_from = self.to_env_from_secret()
if cp_pod.spec.containers[0].env_from is None:
cp_pod.spec.containers[0].env_from = []
cp_pod.spec.containers[0].env_from.append(env_from)
return cp_pod
def __eq__(self, other):
return (
self.deploy_type == other.deploy_type
and self.deploy_target == other.deploy_target
and self.secret == other.secret
and self.key == other.key
)
def __hash__(self):
return hash((self.deploy_type, self.deploy_target, self.secret, self.key))
def __repr__(self):
return f"Secret({self.deploy_type}, {self.deploy_target}, {self.secret}, {self.key})"
| Secret |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/execution.py | {
"start": 2179,
"end": 2817
} | class ____(graphene.Enum):
COMPUTE = "COMPUTE"
UNRESOLVED_MAPPED = "UNRESOLVED_MAPPED"
UNRESOLVED_COLLECT = "UNRESOLVED_COLLECT"
class Meta:
name = "StepKind"
@property
def description(self):
if self == GrapheneStepKind.COMPUTE:
return "This is a user-defined computation step"
if self == GrapheneStepKind.UNRESOLVED_MAPPED:
return "This is a mapped step that has not yet been resolved"
if self == GrapheneStepKind.UNRESOLVED_COLLECT:
return "This is a collect step that is not yet resolved"
else:
return None
| GrapheneStepKind |
python | pydata__xarray | xarray/tests/test_rolling.py | {
"start": 20019,
"end": 34403
} | class ____:
@pytest.mark.parametrize(
"funcname, argument",
[
("reduce", (np.mean,)),
("mean", ()),
("construct", ("window_dim",)),
("count", ()),
],
)
def test_rolling_keep_attrs(self, funcname, argument) -> None:
global_attrs = {"units": "test", "long_name": "testing"}
da_attrs = {"da_attr": "test"}
da_not_rolled_attrs = {"da_not_rolled_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={"da": ("coord", data), "da_not_rolled": ("no_coord", data)},
coords={"coord": coords},
attrs=global_attrs,
)
ds.da.attrs = da_attrs
ds.da_not_rolled.attrs = da_not_rolled_attrs
# attrs are kept by default
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
result = func(*argument)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_rolled.attrs == da_not_rolled_attrs
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
# discard attrs
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
# test discard attrs using global option
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
# keyword takes precedence over global option
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument, keep_attrs=True)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_rolled.attrs == da_not_rolled_attrs
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=True):
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
def test_rolling_properties(self, ds) -> None:
# catching invalid args
with pytest.raises(ValueError, match="window must be > 0"):
ds.rolling(time=-2)
with pytest.raises(ValueError, match="min_periods must be greater than zero"):
ds.rolling(time=2, min_periods=0)
with pytest.raises(KeyError, match="time2"):
ds.rolling(time2=2)
with pytest.raises(
KeyError,
match=r"\('foo',\) not found in Dataset dimensions",
):
ds.rolling(foo=2)
@requires_dask_ge_2024_11_0
def test_rolling_construct_automatic_rechunk(self):
import dask
# Construct dataset with chunk size of (400, 400, 1) or 1.22 MiB
da = DataArray(
dims=["latitude", "longitude", "time"],
data=dask.array.random.random((400, 400, 400), chunks=(-1, -1, 1)),
name="foo",
)
for obj in [da, da.to_dataset()]:
# Dataset now has chunks of size (400, 400, 100 100) or 11.92 GiB
rechunked = obj.rolling(time=100, center=True).construct(
"window",
sliding_window_view_kwargs=dict(
automatic_rechunk=True, writeable=False
),
)
not_rechunked = obj.rolling(time=100, center=True).construct(
"window",
sliding_window_view_kwargs=dict(
automatic_rechunk=False, writeable=True
),
)
assert rechunked.chunksizes != not_rechunked.chunksizes
roller = obj.isel(time=slice(30)).rolling(time=10, center=True)
one = roller.reduce(
np.sum, sliding_window_view_kwargs=dict(automatic_rechunk=True)
)
two = roller.reduce(
np.sum, sliding_window_view_kwargs=dict(automatic_rechunk=False)
)
assert_identical(one, two)
@pytest.mark.parametrize(
"name", ("sum", "mean", "std", "var", "min", "max", "median")
)
@pytest.mark.parametrize("center", (True, False, None))
@pytest.mark.parametrize("min_periods", (1, None))
@pytest.mark.parametrize("key", ("z1", "z2"))
@pytest.mark.parametrize("backend", ["numpy"], indirect=True)
def test_rolling_wrapped_bottleneck(
self, ds, name, center, min_periods, key, compute_backend
) -> None:
bn = pytest.importorskip("bottleneck", minversion="1.1")
# Test all bottleneck functions
rolling_obj = ds.rolling(time=7, min_periods=min_periods)
func_name = f"move_{name}"
actual = getattr(rolling_obj, name)()
if key == "z1": # z1 does not depend on 'Time' axis. Stored as it is.
expected = ds[key]
elif key == "z2":
expected = getattr(bn, func_name)(
ds[key].values, window=7, axis=0, min_count=min_periods
)
else:
raise ValueError
np.testing.assert_allclose(actual[key].values, expected)
# Test center
rolling_obj = ds.rolling(time=7, center=center)
actual = getattr(rolling_obj, name)()["time"]
assert_allclose(actual, ds["time"])
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1, 2, 3))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
def test_rolling_pandas_compat(self, center, window, min_periods) -> None:
df = pd.DataFrame(
{
"x": np.random.randn(20),
"y": np.random.randn(20),
"time": np.linspace(0, 1, 20),
}
)
ds = Dataset.from_dataframe(df)
if min_periods is not None and window < min_periods:
min_periods = window
df_rolling = df.rolling(window, center=center, min_periods=min_periods).mean()
ds_rolling = ds.rolling(
index=window, center=center, min_periods=min_periods
).mean()
np.testing.assert_allclose(
np.asarray(df_rolling["x"].values), ds_rolling["x"].values
)
np.testing.assert_allclose(df_rolling.index, ds_rolling["index"])
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
def test_rolling_construct(self, center: bool, window: int) -> None:
df = pd.DataFrame(
{
"x": np.random.randn(20),
"y": np.random.randn(20),
"time": np.linspace(0, 1, 20),
}
)
ds = Dataset.from_dataframe(df)
df_rolling = df.rolling(window, center=center, min_periods=1).mean()
ds_rolling = ds.rolling(index=window, center=center)
ds_rolling_mean = ds_rolling.construct("window").mean("window")
np.testing.assert_allclose(
np.asarray(df_rolling["x"].values), ds_rolling_mean["x"].values
)
np.testing.assert_allclose(df_rolling.index, ds_rolling_mean["index"])
# with fill_value
ds_rolling_mean = ds_rolling.construct("window", stride=2, fill_value=0.0).mean(
"window"
)
assert (ds_rolling_mean.isnull().sum() == 0).to_dataarray(dim="vars").all()
assert (ds_rolling_mean["x"] == 0.0).sum() >= 0
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
def test_rolling_construct_stride(self, center: bool, window: int) -> None:
df = pd.DataFrame(
{
"x": np.random.randn(20),
"y": np.random.randn(20),
"time": np.linspace(0, 1, 20),
}
)
ds = Dataset.from_dataframe(df)
df_rolling_mean = df.rolling(window, center=center, min_periods=1).mean()
# With an index (dimension coordinate)
ds_rolling = ds.rolling(index=window, center=center)
ds_rolling_mean = ds_rolling.construct("w", stride=2).mean("w")
np.testing.assert_allclose(
np.asarray(df_rolling_mean["x"][::2].values), ds_rolling_mean["x"].values
)
np.testing.assert_allclose(df_rolling_mean.index[::2], ds_rolling_mean["index"])
# Without index (https://github.com/pydata/xarray/issues/7021)
ds2 = ds.drop_vars("index")
ds2_rolling = ds2.rolling(index=window, center=center)
ds2_rolling_mean = ds2_rolling.construct("w", stride=2).mean("w")
np.testing.assert_allclose(
np.asarray(df_rolling_mean["x"][::2].values), ds2_rolling_mean["x"].values
)
# Mixed coordinates, indexes and 2D coordinates
ds3 = xr.Dataset(
{"x": ("t", range(20)), "x2": ("y", range(5))},
{
"t": range(20),
"y": ("y", range(5)),
"t2": ("t", range(20)),
"y2": ("y", range(5)),
"yt": (["t", "y"], np.ones((20, 5))),
},
)
ds3_rolling = ds3.rolling(t=window, center=center)
ds3_rolling_mean = ds3_rolling.construct("w", stride=2).mean("w")
for coord in ds3.coords:
assert coord in ds3_rolling_mean.coords
@pytest.mark.slow
@pytest.mark.parametrize("ds", (1, 2), indirect=True)
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1, 2, 3))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
@pytest.mark.parametrize(
"name", ("sum", "mean", "std", "var", "min", "max", "median")
)
def test_rolling_reduce(self, ds, center, min_periods, window, name) -> None:
if min_periods is not None and window < min_periods:
min_periods = window
if name == "std" and window == 1:
pytest.skip("std with window == 1 is unstable in bottleneck")
rolling_obj = ds.rolling(time=window, center=center, min_periods=min_periods)
# add nan prefix to numpy methods to get similar behavior as bottleneck
actual = rolling_obj.reduce(getattr(np, f"nan{name}"))
expected = getattr(rolling_obj, name)()
assert_allclose(actual, expected)
assert ds.sizes == actual.sizes
# make sure the order of data_var are not changed.
assert list(ds.data_vars.keys()) == list(actual.data_vars.keys())
# Make sure the dimension order is restored
for key, src_var in ds.data_vars.items():
assert src_var.dims == actual[key].dims
@pytest.mark.parametrize("ds", (2,), indirect=True)
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1))
@pytest.mark.parametrize("name", ("sum", "max"))
@pytest.mark.parametrize("dask", (True, False))
def test_ndrolling_reduce(self, ds, center, min_periods, name, dask) -> None:
if dask and has_dask:
ds = ds.chunk({"x": 4})
rolling_obj = ds.rolling(time=4, x=3, center=center, min_periods=min_periods)
actual = getattr(rolling_obj, name)()
expected = getattr(
getattr(
ds.rolling(time=4, center=center, min_periods=min_periods), name
)().rolling(x=3, center=center, min_periods=min_periods),
name,
)()
assert_allclose(actual, expected)
assert actual.sizes == expected.sizes
# Do it in the opposite order
expected = getattr(
getattr(
ds.rolling(x=3, center=center, min_periods=min_periods), name
)().rolling(time=4, center=center, min_periods=min_periods),
name,
)()
assert_allclose(actual, expected)
assert actual.sizes == expected.sizes
@pytest.mark.parametrize("center", (True, False, (True, False)))
@pytest.mark.parametrize("fill_value", (np.nan, 0.0))
@pytest.mark.parametrize("dask", (True, False))
def test_ndrolling_construct(self, center, fill_value, dask) -> None:
da = DataArray(
np.arange(5 * 6 * 7).reshape(5, 6, 7).astype(float),
dims=["x", "y", "z"],
coords={"x": ["a", "b", "c", "d", "e"], "y": np.arange(6)},
)
ds = xr.Dataset({"da": da})
if dask and has_dask:
ds = ds.chunk({"x": 4})
actual = ds.rolling(x=3, z=2, center=center).construct(
x="x1", z="z1", fill_value=fill_value
)
if not isinstance(center, tuple):
center = (center, center)
expected = (
ds.rolling(x=3, center=center[0])
.construct(x="x1", fill_value=fill_value)
.rolling(z=2, center=center[1])
.construct(z="z1", fill_value=fill_value)
)
assert_allclose(actual, expected)
@requires_dask
@pytest.mark.filterwarnings("error")
@pytest.mark.parametrize("ds", (2,), indirect=True)
@pytest.mark.parametrize("name", ("mean", "max"))
def test_raise_no_warning_dask_rolling_assert_close(self, ds, name) -> None:
"""
This is a puzzle — I can't easily find the source of the warning. It
requires `assert_allclose` to be run, for the `ds` param to be 2, and is
different for `mean` and `max`. `sum` raises no warning.
"""
ds = ds.chunk({"x": 4})
rolling_obj = ds.rolling(time=4, x=3)
actual = getattr(rolling_obj, name)()
expected = getattr(getattr(ds.rolling(time=4), name)().rolling(x=3), name)()
assert_allclose(actual, expected)
@requires_numbagg
| TestDatasetRolling |
python | huggingface__transformers | src/transformers/models/dbrx/modeling_dbrx.py | {
"start": 27750,
"end": 32799
} | class ____(DbrxPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config: DbrxConfig):
super().__init__(config)
self.transformer = DbrxModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.ffn_config.moe_loss_weight
self.num_experts = config.ffn_config.moe_num_experts
self.num_experts_per_tok = config.ffn_config.moe_top_k
self.post_init()
def get_input_embeddings(self) -> nn.Embedding:
return self.transformer.get_input_embeddings()
def set_input_embeddings(self, value: nn.Embedding):
self.transformer.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Linear:
return self.lm_head
def set_output_embeddings(self, new_embeddings: nn.Linear):
self.lm_head = new_embeddings
def set_decoder(self, decoder: DbrxModel):
self.transformer = decoder
def get_decoder(self) -> DbrxModel:
return self.transformer
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>> from transformers import AutoTokenizer, DbrxForCausalLM
>> model = DbrxForCausalLM.from_pretrained("databricks/dbrx-instruct")
>> tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct")
>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")
>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```
"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
__all__ = ["DbrxForCausalLM", "DbrxModel", "DbrxPreTrainedModel"]
| DbrxForCausalLM |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/mutable.py | {
"start": 27841,
"end": 31004
} | class ____(Mutable, Dict[_KT, _VT]):
"""A dictionary type that implements :class:`.Mutable`.
The :class:`.MutableDict` object implements a dictionary that will
emit change events to the underlying mapping when the contents of
the dictionary are altered, including when values are added or removed.
Note that :class:`.MutableDict` does **not** apply mutable tracking to the
*values themselves* inside the dictionary. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
dictionary structure, such as a JSON structure. To support this use case,
build a subclass of :class:`.MutableDict` that provides appropriate
coercion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. seealso::
:class:`.MutableList`
:class:`.MutableSet`
"""
def __setitem__(self, key: _KT, value: _VT) -> None:
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
if TYPE_CHECKING:
# from https://github.com/python/mypy/issues/14858
@overload
def setdefault(
self: MutableDict[_KT, Optional[_T]], key: _KT, value: None = None
) -> Optional[_T]: ...
@overload
def setdefault(self, key: _KT, value: _VT) -> _VT: ...
def setdefault(self, key: _KT, value: object = None) -> object: ...
else:
def setdefault(self, *arg): # noqa: F811
result = dict.setdefault(self, *arg)
self.changed()
return result
def __delitem__(self, key: _KT) -> None:
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
def update(self, *a: Any, **kw: _VT) -> None:
dict.update(self, *a, **kw)
self.changed()
if TYPE_CHECKING:
@overload
def pop(self, __key: _KT, /) -> _VT: ...
@overload
def pop(self, __key: _KT, default: _VT | _T, /) -> _VT | _T: ...
def pop(
self, __key: _KT, __default: _VT | _T | None = None, /
) -> _VT | _T: ...
else:
def pop(self, *arg): # noqa: F811
result = dict.pop(self, *arg)
self.changed()
return result
def popitem(self) -> Tuple[_KT, _VT]:
result = dict.popitem(self)
self.changed()
return result
def clear(self) -> None:
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key: str, value: Any) -> MutableDict[_KT, _VT] | None:
"""Convert plain dictionary to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, dict):
return cls(value)
return Mutable.coerce(key, value)
else:
return value
def __getstate__(self) -> Dict[_KT, _VT]:
return dict(self)
def __setstate__(
self, state: Union[Dict[str, int], Dict[str, str]]
) -> None:
self.update(state)
| MutableDict |
python | django-haystack__django-haystack | haystack/management/commands/clear_index.py | {
"start": 88,
"end": 2265
} | class ____(BaseCommand):
help = "Clears out the search index completely." # noqa A003
def add_arguments(self, parser):
parser.add_argument(
"--noinput",
action="store_false",
dest="interactive",
default=True,
help="If provided, no prompts will be issued to the user and the data will be wiped out.",
)
parser.add_argument(
"-u",
"--using",
action="append",
default=[],
help="Update only the named backend (can be used multiple times). "
"By default all backends will be updated.",
)
parser.add_argument(
"--nocommit",
action="store_false",
dest="commit",
default=True,
help="Will pass commit=False to the backend.",
)
def handle(self, **options):
"""Clears out the search index completely."""
self.verbosity = int(options.get("verbosity", 1))
self.commit = options.get("commit", True)
using = options.get("using")
if not using:
using = connections.connections_info.keys()
if options.get("interactive", True):
self.stdout.write(
"WARNING: This will irreparably remove EVERYTHING from your search index in connection '%s'."
% "', '".join(using)
)
self.stdout.write(
"Your choices after this are to restore from backups or rebuild via the `rebuild_index` command."
)
yes_or_no = input("Are you sure you wish to continue? [y/N] ")
if not yes_or_no.lower().startswith("y"):
self.stdout.write("No action taken.")
return
if self.verbosity >= 1:
self.stdout.write(
"Removing all documents from your index because you said so."
)
for backend_name in using:
backend = connections[backend_name].get_backend()
backend.clear(commit=self.commit)
if self.verbosity >= 1:
self.stdout.write("All documents removed.")
| Command |
python | spyder-ide__spyder | spyder/widgets/helperwidgets.py | {
"start": 21692,
"end": 23543
} | class ____(QLabel):
"""Icon widget to show information as a tooltip when clicked."""
def __init__(
self,
tip_text: str,
icon: QIcon,
hover_icon: QIcon,
size: int | None = None,
wrap_text: bool = False
):
super().__init__()
if wrap_text:
tip_text = '\n'.join(textwrap.wrap(tip_text, 50))
self.tip_text = tip_text
size = size if size is not None else AppStyle.ConfigPageIconSize
self.icon = icon.pixmap(QSize(size, size))
self.hover_icon = hover_icon.pixmap(QSize(size, size))
# Timer to show the tip if users don't click on the widget
self.tip_timer = QTimer(self)
self.tip_timer.setInterval(300)
self.tip_timer.setSingleShot(True)
self.tip_timer.timeout.connect(self.show_tip)
self.setPixmap(self.icon)
self.setFixedWidth(size + 3)
self.setFixedHeight(size + 3)
def show_tip(self):
"""Show tooltip"""
if not QToolTip.isVisible():
QToolTip.showText(
self.mapToGlobal(QPoint(self.width(), 15)),
self.tip_text,
self
)
def enterEvent(self, event):
"""
Change cursor shape and set hover icon when the mouse is on the widget.
"""
self.setCursor(Qt.PointingHandCursor)
self.setPixmap(self.hover_icon)
self.tip_timer.start()
super().enterEvent(event)
def leaveEvent(self, event):
"""Hide tooltip and restore icon when the mouse leaves the widget."""
QToolTip.hideText()
self.setPixmap(self.icon)
self.tip_timer.stop()
super().leaveEvent(event)
def mouseReleaseEvent(self, event):
"""Show tooltip when the widget is clicked."""
self.show_tip()
| TipWidget |
python | getsentry__sentry | src/sentry/monitors/processing_errors/errors.py | {
"start": 5161,
"end": 5702
} | class ____(Exception):
"""
This exception should be raised with a list of ProcessingError representing
the problems which occurred while processing a monitor check-in.
"""
def __init__(
self,
processing_errors: Sequence[ProcessingError],
monitor: Monitor | None = None,
):
self.processing_errors = processing_errors
# Monitor is optional, since we don't always have the monitor related
# to the checkin available
self.monitor = monitor
| ProcessingErrorsException |
python | fluentpython__example-code-2e | 10-dp-1class-func/pytypes/classic_strategy.py | {
"start": 2556,
"end": 2884
} | class ____(Promotion): # second Concrete Strategy
"""10% discount for each LineItem with 20 or more units"""
def discount(self, order):
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
@typelogged
| BulkItemPromo |
python | getlogbook__logbook | src/logbook/compat.py | {
"start": 979,
"end": 1745
} | class ____:
"""Temporarily redirects logging for all threads and reverts
it later to the old handlers. Mainly used by the internal
unittests::
from logbook.compat import redirected_logging
with redirected_logging():
...
"""
def __init__(self, set_root_logger_level=True):
self.old_handlers = logging.root.handlers[:]
self.old_level = logging.root.level
self.set_root_logger_level = set_root_logger_level
def start(self):
redirect_logging(self.set_root_logger_level)
def end(self, etype=None, evalue=None, tb=None):
logging.root.handlers[:] = self.old_handlers
logging.root.setLevel(self.old_level)
__enter__ = start
__exit__ = end
| redirected_logging |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 7407,
"end": 8967
} | class ____(Pool):
"""One-dimensional downsample using the maximum over a sliding window."""
def __init__(
self,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = 1,
padding: int | Sequence[int] | Sequence[tuple[int, int]] = 0,
use_ceil: bool = False,
):
"""**Arguments:**
- `kernel_size`: The size of the convolutional kernel.
- `stride`: The stride of the convolution.
- `padding`: The amount of padding to apply before and after each
spatial dimension.
- `use_ceil`: If `True`, then `ceil` is used to compute the final output
shape instead of `floor`. For `ceil`, if required, extra padding is added.
Defaults to `False`.
"""
super().__init__(
init=-jnp.inf,
operation=lax.max,
num_spatial_dims=1,
kernel_size=kernel_size,
stride=stride,
padding=padding,
use_ceil=use_ceil,
)
# Redefined to get them in the right order in docs
@named_scope("eqx.nn.MaxPool1d")
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape `(channels, dim)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(channels, new_dim)`.
"""
return super().__call__(x)
| MaxPool1d |
python | lepture__authlib | authlib/oauth2/auth.py | {
"start": 2392,
"end": 3587
} | class ____:
"""Attach token information to HTTP requests.
:param token: A dict or OAuth2Token instance of an OAuth 2.0 token
:param token_placement: The placement of the token, default is ``header``,
available choices:
* header (default)
* body
* uri
"""
DEFAULT_TOKEN_TYPE = "bearer"
SIGN_METHODS = {"bearer": add_bearer_token}
def __init__(self, token, token_placement="header", client=None):
self.token = OAuth2Token.from_dict(token)
self.token_placement = token_placement
self.client = client
self.hooks = set()
def set_token(self, token):
self.token = OAuth2Token.from_dict(token)
def prepare(self, uri, headers, body):
token_type = self.token.get("token_type", self.DEFAULT_TOKEN_TYPE)
sign = self.SIGN_METHODS[token_type.lower()]
uri, headers, body = sign(
self.token["access_token"], uri, headers, body, self.token_placement
)
for hook in self.hooks:
uri, headers, body = hook(uri, headers, body)
return uri, headers, body
def __del__(self):
del self.client
del self.hooks
| TokenAuth |
python | numpy__numpy | numpy/fft/tests/test_pocketfft.py | {
"start": 462,
"end": 22253
} | class ____:
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j * random(maxlen)
xr = random(maxlen)
for i in range(1, maxlen):
assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
atol=1e-12)
assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i),
xr[0:i], atol=1e-12)
@pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble])
def test_identity_long_short(self, dtype):
# Test with explicitly given number of points, both for n
# smaller and for n larger than the input size.
maxlen = 16
atol = 5 * np.spacing(np.array(1., dtype=dtype))
x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype)
xx = np.concatenate([x, np.zeros_like(x)])
xr = random(maxlen).astype(dtype)
xxr = np.concatenate([xr, np.zeros_like(xr)])
for i in range(1, maxlen * 2):
check_c = np.fft.ifft(np.fft.fft(x, n=i), n=i)
assert check_c.real.dtype == dtype
assert_allclose(check_c, xx[0:i], atol=atol, rtol=0)
check_r = np.fft.irfft(np.fft.rfft(xr, n=i), n=i)
assert check_r.dtype == dtype
assert_allclose(check_r, xxr[0:i], atol=atol, rtol=0)
@pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble])
def test_identity_long_short_reversed(self, dtype):
# Also test explicitly given number of points in reversed order.
maxlen = 16
atol = 6 * np.spacing(np.array(1., dtype=dtype))
x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype)
xx = np.concatenate([x, np.zeros_like(x)])
for i in range(1, maxlen * 2):
check_via_c = np.fft.fft(np.fft.ifft(x, n=i), n=i)
assert check_via_c.dtype == x.dtype
assert_allclose(check_via_c, xx[0:i], atol=atol, rtol=0)
# For irfft, we can neither recover the imaginary part of
# the first element, nor the imaginary part of the last
# element if npts is even. So, set to 0 for the comparison.
y = x.copy()
n = i // 2 + 1
y.imag[0] = 0
if i % 2 == 0:
y.imag[n - 1:] = 0
yy = np.concatenate([y, np.zeros_like(y)])
check_via_r = np.fft.rfft(np.fft.irfft(x, n=i), n=i)
assert check_via_r.dtype == x.dtype
assert_allclose(check_via_r, yy[0:n], atol=atol, rtol=0)
def test_fft(self):
x = random(30) + 1j * random(30)
assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6)
assert_allclose(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"), atol=1e-6)
assert_allclose(fft1(x) / 30.,
np.fft.fft(x, norm="forward"), atol=1e-6)
@pytest.mark.parametrize("axis", (0, 1))
@pytest.mark.parametrize("dtype", (complex, float))
@pytest.mark.parametrize("transpose", (True, False))
def test_fft_out_argument(self, dtype, transpose, axis):
def zeros_like(x):
if transpose:
return np.zeros_like(x.T).T
else:
return np.zeros_like(x)
# tests below only test the out parameter
if dtype is complex:
y = random((10, 20)) + 1j * random((10, 20))
fft, ifft = np.fft.fft, np.fft.ifft
else:
y = random((10, 20))
fft, ifft = np.fft.rfft, np.fft.irfft
expected = fft(y, axis=axis)
out = zeros_like(expected)
result = fft(y, out=out, axis=axis)
assert result is out
assert_array_equal(result, expected)
expected2 = ifft(expected, axis=axis)
out2 = out if dtype is complex else zeros_like(expected2)
result2 = ifft(out, out=out2, axis=axis)
assert result2 is out2
assert_array_equal(result2, expected2)
@pytest.mark.parametrize("axis", [0, 1])
def test_fft_inplace_out(self, axis):
# Test some weirder in-place combinations
y = random((20, 20)) + 1j * random((20, 20))
# Fully in-place.
y1 = y.copy()
expected1 = np.fft.fft(y1, axis=axis)
result1 = np.fft.fft(y1, axis=axis, out=y1)
assert result1 is y1
assert_array_equal(result1, expected1)
# In-place of part of the array; rest should be unchanged.
y2 = y.copy()
out2 = y2[:10] if axis == 0 else y2[:, :10]
expected2 = np.fft.fft(y2, n=10, axis=axis)
result2 = np.fft.fft(y2, n=10, axis=axis, out=out2)
assert result2 is out2
assert_array_equal(result2, expected2)
if axis == 0:
assert_array_equal(y2[10:], y[10:])
else:
assert_array_equal(y2[:, 10:], y[:, 10:])
# In-place of another part of the array.
y3 = y.copy()
y3_sel = y3[5:] if axis == 0 else y3[:, 5:]
out3 = y3[5:15] if axis == 0 else y3[:, 5:15]
expected3 = np.fft.fft(y3_sel, n=10, axis=axis)
result3 = np.fft.fft(y3_sel, n=10, axis=axis, out=out3)
assert result3 is out3
assert_array_equal(result3, expected3)
if axis == 0:
assert_array_equal(y3[:5], y[:5])
assert_array_equal(y3[15:], y[15:])
else:
assert_array_equal(y3[:, :5], y[:, :5])
assert_array_equal(y3[:, 15:], y[:, 15:])
# In-place with n > nin; rest should be unchanged.
y4 = y.copy()
y4_sel = y4[:10] if axis == 0 else y4[:, :10]
out4 = y4[:15] if axis == 0 else y4[:, :15]
expected4 = np.fft.fft(y4_sel, n=15, axis=axis)
result4 = np.fft.fft(y4_sel, n=15, axis=axis, out=out4)
assert result4 is out4
assert_array_equal(result4, expected4)
if axis == 0:
assert_array_equal(y4[15:], y[15:])
else:
assert_array_equal(y4[:, 15:], y[:, 15:])
# Overwrite in a transpose.
y5 = y.copy()
out5 = y5.T
result5 = np.fft.fft(y5, axis=axis, out=out5)
assert result5 is out5
assert_array_equal(result5, expected1)
# Reverse strides.
y6 = y.copy()
out6 = y6[::-1] if axis == 0 else y6[:, ::-1]
result6 = np.fft.fft(y6, axis=axis, out=out6)
assert result6 is out6
assert_array_equal(result6, expected1)
def test_fft_bad_out(self):
x = np.arange(30.)
with pytest.raises(TypeError, match="must be of ArrayType"):
np.fft.fft(x, out="")
with pytest.raises(ValueError, match="has wrong shape"):
np.fft.fft(x, out=np.zeros_like(x).reshape(5, -1))
with pytest.raises(TypeError, match="Cannot cast"):
np.fft.fft(x, out=np.zeros_like(x, dtype=float))
@pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward'))
def test_ifft(self, norm):
x = random(30) + 1j * random(30)
assert_allclose(
x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
atol=1e-6)
# Ensure we get the correct error message
with pytest.raises(ValueError,
match='Invalid number of FFT data points'):
np.fft.ifft([], norm=norm)
def test_fft2(self):
x = random((30, 20)) + 1j * random((30, 20))
assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x), atol=1e-6)
assert_allclose(np.fft.fft2(x),
np.fft.fft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.fft2(x) / (30. * 20.),
np.fft.fft2(x, norm="forward"), atol=1e-6)
def test_ifft2(self):
x = random((30, 20)) + 1j * random((30, 20))
assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x), atol=1e-6)
assert_allclose(np.fft.ifft2(x),
np.fft.ifft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * (30. * 20.),
np.fft.ifft2(x, norm="forward"), atol=1e-6)
def test_fftn(self):
x = random((30, 20, 10)) + 1j * random((30, 20, 10))
assert_allclose(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x), atol=1e-6)
assert_allclose(np.fft.fftn(x),
np.fft.fftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.),
np.fft.fftn(x, norm="forward"), atol=1e-6)
def test_ifftn(self):
x = random((30, 20, 10)) + 1j * random((30, 20, 10))
assert_allclose(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x), atol=1e-6)
assert_allclose(np.fft.ifftn(x),
np.fft.ifftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.),
np.fft.ifftn(x, norm="forward"), atol=1e-6)
def test_rfft(self):
x = random(30)
for n in [x.size, 2 * x.size]:
for norm in [None, 'backward', 'ortho', 'forward']:
assert_allclose(
np.fft.fft(x, n=n, norm=norm)[:(n // 2 + 1)],
np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n),
np.fft.rfft(x, n=n, norm="backward"), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / np.sqrt(n),
np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / n,
np.fft.rfft(x, n=n, norm="forward"), atol=1e-6)
def test_rfft_even(self):
x = np.arange(8)
n = 4
y = np.fft.rfft(x, n)
assert_allclose(y, np.fft.fft(x[:n])[:n // 2 + 1], rtol=1e-14)
def test_rfft_odd(self):
x = np.array([1, 0, 2, 3, -3])
y = np.fft.rfft(x)
assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14)
def test_irfft(self):
x = random(30)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_rfft2(self):
x = random((30, 20))
assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
assert_allclose(np.fft.rfft2(x),
np.fft.rfft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / (30. * 20.),
np.fft.rfft2(x, norm="forward"), atol=1e-6)
def test_irfft2(self):
x = random((30, 20))
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_rfftn(self):
x = random((30, 20, 10))
assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
assert_allclose(np.fft.rfftn(x),
np.fft.rfftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.),
np.fft.rfftn(x, norm="forward"), atol=1e-6)
# Regression test for gh-27159
x = np.ones((2, 3))
result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40))
assert result.shape == (10, 21)
expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40),
axis=0, n=20), axis=0, n=10)
assert expected.shape == (10, 21)
assert_allclose(result, expected, atol=1e-6)
def test_irfftn(self):
x = random((30, 20, 10))
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_hfft(self):
x = random(14) + 1j * random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm),
np.fft.hfft(x_herm, norm="backward"), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / 30.,
np.fft.hfft(x_herm, norm="forward"), atol=1e-6)
def test_ihfft(self):
x = random(14) + 1j * random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="backward"), norm="backward"), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="ortho"), norm="ortho"), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="forward"), norm="forward"), atol=1e-6)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
def test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_allclose(op_tr, tr_op, atol=1e-6)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.fft2, np.fft.ifft2])
def test_s_negative_1(self, op):
x = np.arange(100).reshape(10, 10)
# should use the whole input array along the first axis
assert op(x, s=(-1, 5), axes=(0, 1)).shape == (10, 5)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
def test_s_axes_none(self, op):
x = np.arange(100).reshape(10, 10)
with pytest.warns(match='`axes` should not be `None` if `s`'):
op(x, s=(-1, 5))
@pytest.mark.parametrize("op", [np.fft.fft2, np.fft.ifft2])
def test_s_axes_none_2D(self, op):
x = np.arange(100).reshape(10, 10)
with pytest.warns(match='`axes` should not be `None` if `s`'):
op(x, s=(-1, 5), axes=None)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn,
np.fft.fft2, np.fft.ifft2])
def test_s_contains_none(self, op):
x = random((30, 20, 10))
with pytest.warns(match='array containing `None` values to `s`'):
op(x, s=(10, None, 10), axes=(0, 1, 2))
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
x_norm = np.linalg.norm(x)
n = x.size * 2
func_pairs = [(np.fft.fft, np.fft.ifft),
(np.fft.rfft, np.fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(np.fft.ihfft, np.fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2 * x.size]:
for norm in [None, 'backward', 'ortho', 'forward']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_allclose(x_norm,
np.linalg.norm(tmp), atol=1e-6)
@pytest.mark.parametrize("axes", [(0, 1), (0, 2), None])
@pytest.mark.parametrize("dtype", (complex, float))
@pytest.mark.parametrize("transpose", (True, False))
def test_fftn_out_argument(self, dtype, transpose, axes):
def zeros_like(x):
if transpose:
return np.zeros_like(x.T).T
else:
return np.zeros_like(x)
# tests below only test the out parameter
if dtype is complex:
x = random((10, 5, 6)) + 1j * random((10, 5, 6))
fft, ifft = np.fft.fftn, np.fft.ifftn
else:
x = random((10, 5, 6))
fft, ifft = np.fft.rfftn, np.fft.irfftn
expected = fft(x, axes=axes)
out = zeros_like(expected)
result = fft(x, out=out, axes=axes)
assert result is out
assert_array_equal(result, expected)
expected2 = ifft(expected, axes=axes)
out2 = out if dtype is complex else zeros_like(expected2)
result2 = ifft(out, out=out2, axes=axes)
assert result2 is out2
assert_array_equal(result2, expected2)
@pytest.mark.parametrize("fft", [np.fft.fftn, np.fft.ifftn, np.fft.rfftn])
def test_fftn_out_and_s_interaction(self, fft):
# With s, shape varies, so generally one cannot pass in out.
if fft is np.fft.rfftn:
x = random((10, 5, 6))
else:
x = random((10, 5, 6)) + 1j * random((10, 5, 6))
with pytest.raises(ValueError, match="has wrong shape"):
fft(x, out=np.zeros_like(x), s=(3, 3, 3), axes=(0, 1, 2))
# Except on the first axis done (which is the last of axes).
s = (10, 5, 5)
expected = fft(x, s=s, axes=(0, 1, 2))
out = np.zeros_like(expected)
result = fft(x, s=s, axes=(0, 1, 2), out=out)
assert result is out
assert_array_equal(result, expected)
@pytest.mark.parametrize("s", [(9, 5, 5), (3, 3, 3)])
def test_irfftn_out_and_s_interaction(self, s):
# Since for irfftn, the output is real and thus cannot be used for
# intermediate steps, it should always work.
x = random((9, 5, 6, 2)) + 1j * random((9, 5, 6, 2))
expected = np.fft.irfftn(x, s=s, axes=(0, 1, 2))
out = np.zeros_like(expected)
result = np.fft.irfftn(x, s=s, axes=(0, 1, 2), out=out)
assert result is out
assert_array_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.complex64, np.complex128])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[np.fft.fft, np.fft.fft2, np.fft.fftn,
np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
# See discussion in pull/14178
_tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
else:
raise ValueError
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("n", [None, 7, 12])
def test_fft_output_order(order, n):
rng = np.random.RandomState(42)
x = rng.rand(10)
x = np.asarray(x, dtype=np.complex64, order=order)
res = np.fft.fft(x, n=n)
assert res.flags.c_contiguous == x.flags.c_contiguous
assert res.flags.f_contiguous == x.flags.f_contiguous
@pytest.mark.skipif(IS_WASM, reason="Cannot start thread")
| TestFFT1D |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 33792,
"end": 34926
} | class ____(Converter):
"""
Handles the bit datatype.
"""
format = "b1"
array_type = BitArray
vararray_type = ScalarVarArray
default = False
binary_one = b"\x08"
binary_zero = b"\0"
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
mapping = {"1": True, "0": False}
if value is False or value.strip() == "":
if not config["version_1_3_or_later"]:
warn_or_raise(W49, W49, (), config, pos)
return False, True
else:
try:
return mapping[value], False
except KeyError:
vo_raise(E04, (value,), config, pos)
def output(self, value, mask):
if mask:
vo_warn(W39)
if value:
return "1"
else:
return "0"
def binparse(self, read):
data = read(1)
return (ord(data) & 0x8) != 0, False
def binoutput(self, value, mask):
if mask:
vo_warn(W39)
if value:
return self.binary_one
return self.binary_zero
| Bit |
python | pypa__pip | tests/unit/test_appdirs.py | {
"start": 175,
"end": 3178
} | class ____:
@pytest.mark.skipif(sys.platform != "win32", reason="Windows-only test")
def test_user_cache_dir_win(self, monkeypatch: pytest.MonkeyPatch) -> None:
_get_win_folder = mock.Mock(return_value="C:\\Users\\test\\AppData\\Local")
monkeypatch.setattr(
platformdirs.windows, # type: ignore
"get_win_folder",
_get_win_folder,
raising=False,
)
assert (
appdirs.user_cache_dir("pip")
== "C:\\Users\\test\\AppData\\Local\\pip\\Cache"
)
assert _get_win_folder.call_args_list == [mock.call("CSIDL_LOCAL_APPDATA")]
@pytest.mark.skipif(sys.platform != "darwin", reason="MacOS-only test")
def test_user_cache_dir_osx(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("HOME", "/home/test")
assert appdirs.user_cache_dir("pip") == "/home/test/Library/Caches/pip"
@pytest.mark.skipif(sys.platform != "linux", reason="Linux-only test")
def test_user_cache_dir_linux(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv("XDG_CACHE_HOME", raising=False)
monkeypatch.setenv("HOME", "/home/test")
assert appdirs.user_cache_dir("pip") == "/home/test/.cache/pip"
@pytest.mark.skipif(sys.platform != "linux", reason="Linux-only test")
def test_user_cache_dir_linux_override(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("XDG_CACHE_HOME", "/home/test/.other-cache")
monkeypatch.setenv("HOME", "/home/test")
assert appdirs.user_cache_dir("pip") == "/home/test/.other-cache/pip"
@pytest.mark.skipif(sys.platform != "linux", reason="Linux-only test")
def test_user_cache_dir_linux_home_slash(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Verify that we are not affected by https://bugs.python.org/issue14768
monkeypatch.delenv("XDG_CACHE_HOME", raising=False)
monkeypatch.setenv("HOME", "/")
assert appdirs.user_cache_dir("pip") == "/.cache/pip"
def test_user_cache_dir_unicode(self, monkeypatch: pytest.MonkeyPatch) -> None:
if sys.platform != "win32":
return
def my_get_win_folder(csidl_name: str) -> str:
return "\u00df\u00e4\u03b1\u20ac"
monkeypatch.setattr(
platformdirs.windows, # type: ignore
"get_win_folder",
my_get_win_folder,
)
# Do not use the isinstance expression directly in the
# assert statement, as the Unicode characters in the result
# cause pytest to fail with an internal error on Python 2.7
result_is_str = isinstance(appdirs.user_cache_dir("test"), str)
assert result_is_str, "user_cache_dir did not return a str"
# Test against regression #3463
from pip._internal.cli.main_parser import create_main_parser
create_main_parser().print_help() # This should not crash
| TestUserCacheDir |
python | kamyu104__LeetCode-Solutions | Python/count-numbers-with-non-decreasing-digits.py | {
"start": 1582,
"end": 3590
} | class ____(object):
def countNumbers(self, l, r, b):
"""
:type l: str
:type r: str
:type b: int
:rtype: int
"""
MOD = 10**9+7
fact, inv, inv_fact = [[1]*2 for _ in xrange(3)]
def nCr(n, k):
while len(inv) <= n: # lazy initialization
fact.append(fact[-1]*len(inv) % MOD)
inv.append(inv[MOD%len(inv)]*(MOD-MOD//len(inv)) % MOD) # https://cp-algorithms.com/algebra/module-inverse.html
inv_fact.append(inv_fact[-1]*inv[-1] % MOD)
return (fact[n]*inv_fact[n-k] % MOD) * inv_fact[k] % MOD
def nHr(n, k):
return nCr(n+k-1, k)
def decrease(digits):
for i in reversed(xrange(len(digits))):
if digits[i]:
digits[i] -= 1
break
digits[i] = 9
def divide(digits, base):
result = []
r = 0
for d in digits:
q, r = divmod(r*10+d, base)
if result or q:
result.append(q)
return result, r
def to_base(digits, base):
result = []
while digits:
digits, r = divide(digits, base)
result.append(r)
result.reverse()
return result
def count(digits):
digits_base = to_base(digits, b)
result = 0
for i in xrange(len(digits_base)):
if i-1 >= 0 and digits_base[i-1] > digits_base[i]:
break
for j in xrange(digits_base[i-1] if i-1 >= 0 else 0, digits_base[i]):
result = (result + nHr((b-1)-j+1, len(digits_base)-(i+1))) % MOD
else:
result = (result+1)%MOD
return result
digits_l = map(int, l)
decrease(digits_l)
digits_r = map(int, r)
return (count(digits_r) - count(digits_l)) % MOD
| Solution2 |
python | pypa__pip | src/pip/_vendor/pyproject_hooks/_in_process/_in_process.py | {
"start": 10093,
"end": 10180
} | class ____(Exception):
"""Nothing should ever raise this exception"""
| _DummyException |
python | jazzband__prettytable | tests/test_colortable.py | {
"start": 642,
"end": 1474
} | class ____:
def test_themeless(
self, row_prettytable: PrettyTable, row_colortable: ColorTable
) -> None:
# Not worth the logic customizing the reset code
# For now we'll just get rid of it
assert (
row_colortable.get_string().replace(RESET_CODE, "")
== row_prettytable.get_string()
)
def test_theme_setter(self, color_theme: Theme) -> None:
table1 = ColorTable(theme=color_theme)
table2 = ColorTable()
table2.theme = color_theme
assert table1.theme == table2.theme
dict1 = table1.__dict__
dict2 = table2.__dict__
# So we don't compare functions
for func in ("_sort_key", "_row_filter"):
del dict1[func]
del dict2[func]
assert dict1 == dict2
| TestColorTable |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py | {
"start": 7377,
"end": 7547
} | class ____(SpaceToBatchTest, CppOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the C++ ops.
"""
pass
| SpaceToBatchCppTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/multi_sink_ports.py | {
"start": 530,
"end": 582
} | class ____:
def send(self):
pass
| QueryBase |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 982010,
"end": 982588
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("body", "body_html", "database_id", "title", "user")
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
body_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="bodyHTML")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
user = sgqlc.types.Field(Actor, graphql_name="user")
| SavedReply |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0130_addons_remove_old_fields.py | {
"start": 121,
"end": 880
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0129_addons_notification_data_migration"),
]
operations = [
migrations.RemoveField(
model_name="addonsconfig",
name="external_version_warning_enabled",
),
migrations.RemoveField(
model_name="addonsconfig",
name="stable_latest_version_warning_enabled",
),
migrations.RemoveField(
model_name="historicaladdonsconfig",
name="external_version_warning_enabled",
),
migrations.RemoveField(
model_name="historicaladdonsconfig",
name="stable_latest_version_warning_enabled",
),
]
| Migration |
python | facebookresearch__faiss | benchs/bench_fw/utils.py | {
"start": 3874,
"end": 6877
} | class ____(Enum):
TIME = 0 # time vs accuracy
SPACE = 1 # space vs accuracy
TIME_SPACE = 2 # (time, space) vs accuracy
def range_search_recall_at_precision(experiment, precision):
return round(
max(
r
for r, p in zip(
experiment["range_search_pr"]["recall"],
experiment["range_search_pr"]["precision"],
)
if p > precision
),
6,
)
def filter_results(
results,
evaluation,
accuracy_metric, # str or func
time_metric=None, # func or None -> use default
space_metric=None, # func or None -> use default
min_accuracy=0,
max_space=0,
max_time=0,
scaling_factor=1.0,
name_filter=None, # func
pareto_mode=ParetoMode.DISABLE,
pareto_metric=ParetoMetric.TIME,
):
if isinstance(accuracy_metric, str):
accuracy_key = accuracy_metric
accuracy_metric = lambda v: v[accuracy_key]
if time_metric is None:
time_metric = lambda v: v["time"] * scaling_factor + (
v["quantizer"]["time"] if "quantizer" in v else 0
)
if space_metric is None:
space_metric = lambda v: results["indices"][v["codec"]]["code_size"]
fe = []
ops = {}
if pareto_mode == ParetoMode.GLOBAL:
op = OperatingPoints()
ops["global"] = op
for k, v in results["experiments"].items():
if f".{evaluation}" in k:
accuracy = accuracy_metric(v)
if min_accuracy > 0 and accuracy < min_accuracy:
continue
space = space_metric(v)
if space is None:
space = 0
if max_space > 0 and space > max_space:
continue
time = time_metric(v)
if max_time > 0 and time > max_time:
continue
idx_name = v["index"] + (
"snap"
if "search_params" in v and v["search_params"]["snap"] == 1
else ""
)
if name_filter is not None and not name_filter(idx_name):
continue
experiment = (accuracy, space, time, k, v)
if pareto_mode == ParetoMode.DISABLE:
fe.append(experiment)
continue
if pareto_mode == ParetoMode.INDEX:
if idx_name not in ops:
ops[idx_name] = OperatingPoints()
op = ops[idx_name]
if pareto_metric == ParetoMetric.TIME:
op.add_operating_point(experiment, accuracy, time)
elif pareto_metric == ParetoMetric.SPACE:
op.add_operating_point(experiment, accuracy, space)
else:
op.add_operating_point(
experiment, accuracy, Cost([time, space])
)
if ops:
for op in ops.values():
for v, _, _ in op.operating_points:
fe.append(v)
fe.sort()
return fe
| ParetoMetric |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_test.py | {
"start": 3109,
"end": 9315
} | class ____(TestModels):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = (
'If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays|control_output_arrays must '
'be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(
None, None, [], input_arrays_with_shape=[('input', [3,
9])]).convert()
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(None, [], None, output_arrays=['output']).convert()
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TFLiteConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
def testRedundantArgumentsWarning(self):
"""Test if the warning message when there are redundant arguments."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32, name='in_tensor')
out_tensor = math_ops.add(in_tensor, in_tensor, name='add')
sess = session.Session()
frozen_graph_def = (
convert_to_constants.convert_variables_to_constants_from_session_graph(
sess, sess.graph_def, ['add']))
# Convert model and ensure model is not None.
log = io.StringIO()
handler = logging.StreamHandler(log)
logging.root.addHandler(handler)
converter = lite.TFLiteConverter(frozen_graph_def, [in_tensor],
[out_tensor],
[('in_tensor', [2, 16, 16, 3])], ['add'])
input_warning_message = 'input_arrays_with_shape will be ignored'
output_warning_message = 'output_arrays will be ignored'
# Convert model and ensure model is not None.
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
self.assertIn(input_warning_message, log.getvalue())
self.assertIn(output_warning_message, log.getvalue())
logging.root.removeHandler(handler)
def testShapeOverriding(self):
"""Test a shape overriding case via the constructor."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32, name='in_tensor')
math_ops.add(in_tensor, in_tensor, name='add')
sess = session.Session()
frozen_graph_def = (
convert_to_constants.convert_variables_to_constants_from_session_graph(
sess, sess.graph_def, ['add']))
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter(frozen_graph_def, None, None,
[('in_tensor', [2, 16, 16, 3])], ['add'])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('in_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([2, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([2, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testPartialShapeOverriding(self):
"""Test a partial shape overriding case via the constructor."""
with ops.Graph().as_default():
in_tensor_a = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32, name='in_tensor_a')
in_tensor_b = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32, name='in_tensor_b')
math_ops.add(in_tensor_a, in_tensor_b, name='add')
sess = session.Session()
frozen_graph_def = (
convert_to_constants.convert_variables_to_constants_from_session_graph(
sess, sess.graph_def, ['add']))
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter(frozen_graph_def, None, None,
[('in_tensor_a', [2, 16, 16, 3])], ['add'])
# There is an unhandled Placeholder op.
with self.assertRaises(ConverterError):
converter.convert()
def testInvalidShapeOverriding(self):
"""Test an invalid shape overriding case via the constructor."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32, name='in_tensor')
math_ops.add(in_tensor, in_tensor, name='add')
sess = session.Session()
frozen_graph_def = (
convert_to_constants.convert_variables_to_constants_from_session_graph(
sess, sess.graph_def, ['add']))
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter(frozen_graph_def, None, None,
[('wrong_tensor', [2, 16, 16, 3])],
['add'])
with self.assertRaises(ConverterError):
converter.convert()
| FromConstructor |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_special_math_ops_test.py | {
"start": 5366,
"end": 7501
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_fresnel_cos_boundary(self):
self.assertAllClose(0., special_math_ops.fresnel_cos(0.))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.fresnel_cos(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_odd(self, dtype):
x = _get_weak_tensor(
np.random.uniform(-100.0, 100.0, size=int(1e4)).astype(dtype)
)
y = special_math_ops.fresnel_cos(x)
neg_y = -special_math_ops.fresnel_cos(-x)
self.assertIsInstance(y, WeakTensor)
self.assertIsInstance(neg_y, WeakTensor)
self.assertAllClose(self.evaluate(y), self.evaluate(neg_y))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
y_wt = special_math_ops.fresnel_cos(x_wt)
self.assertIsInstance(y_wt, WeakTensor)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(special.fresnel(x)[1], self.evaluate(y_wt))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
x_wt = _get_weak_tensor(x)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[1],
self.evaluate(special_math_ops.fresnel_cos(x_wt)),
rtol=1e-5,
)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_fresnel_cos_gradient(self):
inputs = [_get_weak_tensor(np.random.uniform(1.0, 50.0, size=int(1e2)))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.fresnel_cos, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 5e-3)
@test_util.run_all_in_graph_and_eager_modes
| FresnelCosTest |
python | sqlalchemy__sqlalchemy | test/orm/test_selectin_relations.py | {
"start": 100425,
"end": 107129
} | class ____(
fixtures.DeclarativeMappedTest, testing.AssertsExecutionResults
):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(ComparableEntity, Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
b = relationship("B")
b_no_omit_join = relationship("B", omit_join=False, overlaps="b")
q = Column(Integer)
class B(ComparableEntity, Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
@classmethod
def insert_data(cls, connection):
A, B = cls.classes("A", "B")
s = Session(connection)
b1, b2 = B(id=1, x=5, y=9), B(id=2, x=10, y=8)
s.add_all(
[
A(id=1, b=b1),
A(id=2, b=b2),
A(id=3, b=b2),
A(id=4, b=None),
A(id=5, b=b1),
]
)
s.commit()
def test_omit_join_warn_on_true(self):
with testing.expect_warnings(
"setting omit_join to True is not supported; selectin "
"loading of this relationship"
):
relationship("B", omit_join=True)
def test_use_join_parent_criteria(self):
A, B = self.classes("A", "B")
s = fixture_session()
q = (
s.query(A)
.filter(A.id.in_([1, 3]))
.options(selectinload(A.b))
.order_by(A.id)
)
results = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.id AS a_id, a.b_id AS a_b_id, a.q AS a_q "
"FROM a WHERE a.id IN (__[POSTCOMPILE_id_1]) ORDER BY a.id",
[{"id_1": [1, 3]}],
),
CompiledSQL(
"SELECT b.id, b.x, b.y "
"FROM b WHERE b.id IN (__[POSTCOMPILE_primary_keys])",
[{"primary_keys": [1, 2]}],
),
)
eq_(
results,
[A(id=1, b=B(id=1, x=5, y=9)), A(id=3, b=B(id=2, x=10, y=8))],
)
def test_use_join_parent_criteria_degrade_on_defer(self):
A, B = self.classes("A", "B")
s = fixture_session()
q = (
s.query(A)
.filter(A.id.in_([1, 3]))
.options(defer(A.b_id), selectinload(A.b))
.order_by(A.id)
)
results = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.id AS a_id, a.q AS a_q "
"FROM a WHERE a.id IN (__[POSTCOMPILE_id_1]) ORDER BY a.id",
[{"id_1": [1, 3]}],
),
# in the very unlikely case that the FK col on parent is
# deferred, we degrade to the JOIN version so that we don't need to
# emit either for each parent object individually, or as a second
# query for them.
CompiledSQL(
"SELECT a_1.id, b.id, b.x, b.y "
"FROM a AS a_1 JOIN b ON b.id = a_1.b_id "
"WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])",
[{"primary_keys": [1, 3]}],
),
)
eq_(
results,
[A(id=1, b=B(id=1, x=5, y=9)), A(id=3, b=B(id=2, x=10, y=8))],
)
def test_use_join(self):
A, B = self.classes("A", "B")
s = fixture_session()
q = s.query(A).options(selectinload(A.b)).order_by(A.id)
results = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.id AS a_id, a.b_id AS a_b_id, a.q AS a_q "
"FROM a ORDER BY a.id",
[{}],
),
CompiledSQL(
"SELECT b.id, b.x, b.y "
"FROM b WHERE b.id IN (__[POSTCOMPILE_primary_keys])",
[{"primary_keys": [1, 2]}],
),
)
b1, b2 = B(id=1, x=5, y=9), B(id=2, x=10, y=8)
eq_(
results,
[
A(id=1, b=b1),
A(id=2, b=b2),
A(id=3, b=b2),
A(id=4, b=None),
A(id=5, b=b1),
],
)
def test_use_join_omit_join_false(self):
A, B = self.classes("A", "B")
s = fixture_session()
q = s.query(A).options(selectinload(A.b_no_omit_join)).order_by(A.id)
results = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.id AS a_id, a.b_id AS a_b_id, a.q AS a_q "
"FROM a ORDER BY a.id",
[{}],
),
CompiledSQL(
"SELECT a_1.id, b.id, b.x, b.y "
"FROM a AS a_1 JOIN b ON b.id = a_1.b_id "
"WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])",
[{"primary_keys": [1, 2, 3, 4, 5]}],
),
)
b1, b2 = B(id=1, x=5, y=9), B(id=2, x=10, y=8)
eq_(
results,
[
A(id=1, b_no_omit_join=b1),
A(id=2, b_no_omit_join=b2),
A(id=3, b_no_omit_join=b2),
A(id=4, b_no_omit_join=None),
A(id=5, b_no_omit_join=b1),
],
)
def test_use_join_parent_degrade_on_defer(self):
A, B = self.classes("A", "B")
s = fixture_session()
q = s.query(A).options(defer(A.b_id), selectinload(A.b)).order_by(A.id)
results = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.id AS a_id, a.q AS a_q FROM a ORDER BY a.id", [{}]
),
# in the very unlikely case that the FK col on parent is
# deferred, we degrade to the JOIN version so that we don't need to
# emit either for each parent object individually, or as a second
# query for them.
CompiledSQL(
"SELECT a_1.id, b.id, b.x, b.y "
"FROM a AS a_1 JOIN b ON b.id = a_1.b_id "
"WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])",
[{"primary_keys": [1, 2, 3, 4, 5]}],
),
)
b1, b2 = B(id=1, x=5, y=9), B(id=2, x=10, y=8)
eq_(
results,
[
A(id=1, b=b1),
A(id=2, b=b2),
A(id=3, b=b2),
A(id=4, b=None),
A(id=5, b=b1),
],
)
| M2OWDegradeTest |
python | getsentry__sentry | src/sentry/integrations/example/integration.py | {
"start": 7214,
"end": 8489
} | class ____(IntegrationProvider):
"""
An example integration, generally used for testing.
"""
key = "example"
name = "Example"
metadata = metadata
integration_cls = ExampleIntegration
features = frozenset(
[
IntegrationFeatures.COMMITS,
IntegrationFeatures.ISSUE_BASIC,
IntegrationFeatures.STACKTRACE_LINK,
]
)
def get_pipeline_views(self) -> Sequence[PipelineView[IntegrationPipeline]]:
return [ExampleSetupView()]
def get_config(self):
return [{"name": "name", "label": "Name", "type": "text", "required": True}]
def post_install(
self,
integration: Integration,
organization: RpcOrganization,
*,
extra: dict[str, Any],
) -> None:
Migrator(integration=serialize_integration(integration), organization=organization).run()
def build_integration(self, state: Mapping[str, Any]) -> IntegrationData:
return {"external_id": state["name"]}
def setup(self):
"""
Executed once Sentry has been initialized at runtime.
>>> def setup(self):
>>> bindings.add('repository.provider', GitHubRepositoryProvider, key='github')
"""
| ExampleIntegrationProvider |
python | ray-project__ray | python/ray/tests/aws/test_aws_batch_tag_update.py | {
"start": 1528,
"end": 2153
} | class ____(unittest.TestCase):
def test_concurrent(self):
num_threads = 100
batches_sent, tags_updated = batch_test(num_threads, delay=0)
self.assertLess(batches_sent, num_threads / 10)
self.assertEqual(tags_updated, num_threads)
def test_serial(self):
num_threads = 5
long_delay = TAG_BATCH_DELAY * 1.2
batches_sent, tags_updated = batch_test(num_threads, delay=long_delay)
self.assertEqual(batches_sent, num_threads)
self.assertEqual(tags_updated, num_threads)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TagBatchTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 634676,
"end": 635337
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("SponsorshipNewsletterEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("SponsorshipNewsletter"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| SponsorshipNewsletterConnection |
python | SmileyChris__easy-thumbnails | easy_thumbnails/models.py | {
"start": 1512,
"end": 1711
} | class ____(FileManager):
def _get_thumbnail_manager(self):
if settings.THUMBNAIL_CACHE_DIMENSIONS:
return self.select_related("dimensions")
return self
| ThumbnailManager |
python | wandb__wandb | wandb/vendor/pygments/lexers/configs.py | {
"start": 17589,
"end": 19054
} | class ____(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = ['nginx.conf']
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'\}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'\{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
# (r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
| NginxConfLexer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position19.py | {
"start": 315,
"end": 933
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position19.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(1, 1, None, None, {"hidden": 1})
worksheet.insert_image("B9", self.image_dir + "red.png", {"x_offset": 128})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/minimum-knight-moves.py | {
"start": 29,
"end": 1806
} | class ____(object):
def minKnightMoves(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
# we can observe from:
# [0]
# [3, 2]
# [2,(1),4]
# [3, 2, 3, 2]
# [2, 3,(2) 3, 4]
# [3, 4, 3, 4, 3, 4]
# [4, 3, 4,(3),4, 5, 4]
# [5, 4, 5, 4, 5, 4, 5, 6]
# [4, 5, 4, 5,(4),5, 6, 5, 6]
# [5, 6, 5, 6, 5, 6, 5, 6, 7, 6]
# [6, 5, 6, 5, 6,(5),6, 7, 6, 7, 8]
# [7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 7, 8]
# [6, 7, 6, 7, 6, 7,(6),7, 8, 7, 8, 9, 8]
# [7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 9, 8, 9, 10]
# [8, 7, 8, 7, 8, 7, 8,(7),8, 9, 8, 9, 10, 9, 10]
# [9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10, 9, 10, 11, 10]
x, y = abs(x), abs(y)
if x < y:
x, y = y, x
lookup = {(1, 0):3, (2, 2):4} # special cases
if (x, y) in lookup:
return lookup[(x, y)]
k = x-y
if y > k:
# if 2y > x, every period 3 of y (or k) with fixed another is increased by 2 (or 1)
# and start from (2k, k) with (k) when y = k (diagonal line)
# ex. (0, 0) ~ (12, 12) ~ ... : 0 => 2,4(special case),2 => 4,4,4 => 6,6,6 => 8,8,8 => ...
# ex. (2, 1) ~ (14, 13) ~ ... : 1 => 3,3,3 => 5,5,5 => 7,7,7 => 9,9,9 => ...
return k - 2*((k-y)//3)
# if 2y <= x, every period 4 of k (or y) with fixed another is increased by 2
# and start from (2k, k) with (k) when y = k (vertical line)
# ex. (0, 0) ~ (11, 0) ~ ... : 0,3(special case),2,3 => 2,3,4,5 => 4,5,6,7 => ...
# ex. (2, 1) ~ (13, 1) ~ ... : 1,2,3,4 => 3,4,5,6 => 5,6,7,8 => ...
return k - 2*((k-y)//4)
# Time: O(n^2)
# Space: O(n^2)
| Solution |
python | openai__gym | tests/vector/utils.py | {
"start": 2623,
"end": 3834
} | class ____(gym.Env):
def __init__(self):
super().__init__()
self.observation_space = CustomSpace()
self.action_space = CustomSpace()
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
super().reset(seed=seed)
return "reset", {}
def step(self, action):
observation = f"step({action:s})"
reward, terminated, truncated = 0.0, False, False
return observation, reward, terminated, truncated, {}
def make_env(env_name, seed, **kwargs):
def _make():
env = gym.make(env_name, disable_env_checker=True, **kwargs)
env.action_space.seed(seed)
env.reset(seed=seed)
return env
return _make
def make_slow_env(slow_reset, seed):
def _make():
env = UnittestSlowEnv(slow_reset=slow_reset)
env.reset(seed=seed)
return env
return _make
def make_custom_space_env(seed):
def _make():
env = CustomSpaceEnv()
env.reset(seed=seed)
return env
return _make
def assert_rng_equal(rng_1: RandomNumberGenerator, rng_2: RandomNumberGenerator):
assert rng_1.bit_generator.state == rng_2.bit_generator.state
| CustomSpaceEnv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.