language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | spyder-ide__spyder | spyder/plugins/layout/layouts.py | {
"start": 566,
"end": 1870
} | class ____(BaseGridLayoutType):
ID = DefaultLayouts.SpyderLayout
def __init__(self, parent_plugin):
super().__init__(parent_plugin)
self.add_area(
[Plugins.Projects, Plugins.OutlineExplorer],
row=0,
column=0,
row_span=2,
visible=False,
)
self.add_area(
[Plugins.Editor],
row=0,
column=1,
row_span=2,
)
self.add_area(
[
Plugins.Help,
Plugins.VariableExplorer,
Plugins.Debugger,
Plugins.Profiler,
Plugins.Plots,
Plugins.Explorer,
Plugins.Find,
Plugins.OnlineHelp,
],
row=0,
column=2,
default=True,
hidden_plugin_ids=[Plugins.OnlineHelp, Plugins.Find]
)
self.add_area(
[Plugins.IPythonConsole, Plugins.History, Plugins.Console],
row=1,
column=2,
hidden_plugin_ids=[Plugins.Console]
)
self.set_column_stretch(0, 1)
self.set_column_stretch(1, 3)
self.set_column_stretch(2, 3)
def get_name(self):
return _("Default layout")
| SpyderLayout |
python | openai__gym | gym/envs/mujoco/swimmer.py | {
"start": 111,
"end": 1676
} | class ____(MuJocoPyEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 25,
}
def __init__(self, **kwargs):
observation_space = Box(low=-np.inf, high=np.inf, shape=(8,), dtype=np.float64)
MuJocoPyEnv.__init__(
self, "swimmer.xml", 4, observation_space=observation_space, **kwargs
)
utils.EzPickle.__init__(self, **kwargs)
def step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.sim.data.qpos[0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = -ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
if self.render_mode == "human":
self.render()
return (
ob,
reward,
False,
False,
dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl),
)
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos.flat[2:], qvel.flat])
def reset_model(self):
self.set_state(
self.init_qpos
+ self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq),
self.init_qvel
+ self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nv),
)
return self._get_obs()
| SwimmerEnv |
python | ray-project__ray | python/ray/data/aggregate.py | {
"start": 33143,
"end": 34968
} | class ____(AggregateFnV2[Set[Any], List[Any]]):
"""Defines unique aggregation.
Example:
.. testcode::
import ray
from ray.data.aggregate import Unique
ds = ray.data.range(100)
ds = ds.add_column("group_key", lambda x: x % 3)
# Calculating the unique values per group:
result = ds.groupby("group_key").aggregate(Unique(on="id")).take_all()
# result: [{'group_key': 0, 'unique(id)': ...},
# {'group_key': 1, 'unique(id)': ...},
# {'group_key': 2, 'unique(id)': ...}]
Args:
on: The name of the column from which to collect unique values.
ignore_nulls: Whether to ignore null values when collecting unique items.
Default is True (nulls are excluded).
alias_name: Optional name for the resulting column.
"""
def __init__(
self,
on: Optional[str] = None,
ignore_nulls: bool = True,
alias_name: Optional[str] = None,
):
super().__init__(
alias_name if alias_name else f"unique({str(on)})",
on=on,
ignore_nulls=ignore_nulls,
zero_factory=set,
)
def combine(self, current_accumulator: Set[Any], new: Set[Any]) -> Set[Any]:
return self._to_set(current_accumulator) | self._to_set(new)
def aggregate_block(self, block: Block) -> List[Any]:
import pyarrow.compute as pac
col = BlockAccessor.for_block(block).to_arrow().column(self._target_col_name)
return pac.unique(col).to_pylist()
@staticmethod
def _to_set(x):
if isinstance(x, set):
return x
elif isinstance(x, list):
return set(x)
else:
return {x}
@PublicAPI
| Unique |
python | kamyu104__LeetCode-Solutions | Python/peeking-iterator.py | {
"start": 59,
"end": 954
} | class ____(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iterator = iterator
self.val_ = None
self.has_next_ = iterator.hasNext()
self.has_peeked_ = False
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if not self.has_peeked_:
self.has_peeked_ = True
self.val_ = self.iterator.next()
return self.val_
def next(self):
"""
:rtype: int
"""
self.val_ = self.peek()
self.has_peeked_ = False
self.has_next_ = self.iterator.hasNext()
return self.val_
def hasNext(self):
"""
:rtype: bool
"""
return self.has_next_
| PeekingIterator |
python | py-pdf__pypdf | tests/__init__.py | {
"start": 2467,
"end": 4294
} | class ____:
def __init__(self, strict=False) -> None:
self.strict = strict
def get_object(self, indirect_reference):
class DummyObj:
def get_object(self) -> "DummyObj":
return self
return DictionaryObject()
def get_reference(self, obj):
return IndirectObject(idnum=1, generation=1, pdf=self)
def is_sublist(child_list, parent_list):
"""
Check if child_list is a sublist of parent_list, with respect to
* elements order
* elements repetition
Elements are compared using `==`
"""
if len(child_list) == 0:
return True
if len(parent_list) == 0:
return False
if parent_list[0] == child_list[0]:
return is_sublist(child_list[1:], parent_list[1:])
return is_sublist(child_list, parent_list[1:])
def read_yaml_to_list_of_dicts(yaml_file: Path) -> list[dict[str, str]]:
with open(yaml_file) as yaml_input:
return yaml.safe_load(yaml_input)
def download_test_pdfs():
"""
Run this before the tests are executed to ensure you have everything locally.
This is especially important to avoid pytest timeouts.
"""
pdfs = read_yaml_to_list_of_dicts(Path(__file__).parent / "example_files.yaml")
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
futures = [
executor.submit(get_data_from_url, pdf["url"], name=pdf["local_filename"])
for pdf in pdfs
]
concurrent.futures.wait(futures)
def test_csv_consistency():
pdfs = read_yaml_to_list_of_dicts(Path(__file__).parent / "example_files.csv")
# Ensure the names are unique
assert len(pdfs) == len({pdf["name"] for pdf in pdfs})
# Ensure the urls are unique
assert len(pdfs) == len({pdf["url"] for pdf in pdfs})
| ReaderDummy |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mariadb.py | {
"start": 2479,
"end": 3724
} | class ____(MySQLDialect):
is_mariadb = True
supports_statement_cache = True
supports_native_uuid = True
_allows_uuid_binds = True
name = "mariadb"
preparer: type[MySQLIdentifierPreparer] = MariaDBIdentifierPreparer
type_compiler_cls = MariaDBTypeCompiler
colspecs = util.update_copy(MySQLDialect.colspecs, {Uuid: _MariaDBUUID})
def initialize(self, connection: Connection) -> None:
super().initialize(connection)
self.supports_native_uuid = (
self.server_version_info is not None
and self.server_version_info >= (10, 7)
)
def loader(driver: str) -> Callable[[], type[MariaDBDialect]]:
dialect_mod = __import__(
"sqlalchemy.dialects.mysql.%s" % driver
).dialects.mysql
driver_mod = getattr(dialect_mod, driver)
if hasattr(driver_mod, "mariadb_dialect"):
driver_cls = driver_mod.mariadb_dialect
return driver_cls # type: ignore[no-any-return]
else:
driver_cls = driver_mod.dialect
return type(
"MariaDBDialect_%s" % driver,
(
MariaDBDialect,
driver_cls,
),
{"supports_statement_cache": True},
)
| MariaDBDialect |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 30358,
"end": 31488
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
query_length=0,
**kwargs: Unpack[TransformersKwargs],
):
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
hidden_states = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
query_length=query_length,
**kwargs,
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
)
| Blip2QFormerEncoder |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_documentation.py | {
"start": 7389,
"end": 8947
} | class ____:
def test_fail_when_documentation_file_path_is_none(self, mocker):
# Arrange
connector = mocker.Mock(technical_name="test-connector", documentation_file_path=None)
# Act
result = documentation.CheckDocumentationExists()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "User facing documentation file is missing. Please create it" in result.message
def test_fail_when_documentation_file_path_does_not_exists(self, mocker, tmp_path):
# Arrange
connector = mocker.Mock(technical_name="test-connector", documentation_file_path=tmp_path / "not_existing_documentation.md")
# Act
result = documentation.CheckDocumentationExists()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "User facing documentation file is missing. Please create it" in result.message
def test_pass_when_documentation_file_path_exists(self, mocker, tmp_path):
# Arrange
documentation_file_path = tmp_path / "documentation.md"
connector = mocker.Mock(technical_name="test-connector", documentation_file_path=documentation_file_path)
connector.documentation_file_path.write_text("")
# Act
result = documentation.CheckDocumentationExists()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert f"User facing documentation file {documentation_file_path} exists" in result.message
| TestCheckDocumentationExists |
python | huggingface__transformers | tests/models/bert_generation/test_modeling_bert_generation.py | {
"start": 17268,
"end": 18066
} | class ____(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = BertGenerationEncoder.from_pretrained(
"google/bert_for_seq_generation_L-24_bbc_encoder", attn_implementation="eager"
)
input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size([1, 8, 1024])
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
| BertGenerationEncoderIntegrationTest |
python | Pylons__pyramid | tests/test_traversal.py | {
"start": 20595,
"end": 21035
} | class ____(unittest.TestCase):
def _callFUT(self, context):
from pyramid.traversal import find_root
return find_root(context)
def test_it(self):
dummy = DummyContext()
baz = DummyContext()
baz.__parent__ = dummy
baz.__name__ = 'baz'
dummy.__parent__ = None
dummy.__name__ = None
result = self._callFUT(baz)
self.assertEqual(result, dummy)
| FindRootTests |
python | python-openxml__python-docx | src/docx/parts/numbering.py | {
"start": 119,
"end": 699
} | class ____(XmlPart):
"""Proxy for the numbering.xml part containing numbering definitions for a document
or glossary."""
@classmethod
def new(cls) -> "NumberingPart":
"""Newly created numbering part, containing only the root ``<w:numbering>`` element."""
raise NotImplementedError
@lazyproperty
def numbering_definitions(self):
"""The |_NumberingDefinitions| instance containing the numbering definitions
(<w:num> element proxies) for this numbering part."""
return _NumberingDefinitions(self._element)
| NumberingPart |
python | great-expectations__great_expectations | tests/datasource/fluent/data_asset/data_connector/test_google_cloud_storage_data_connector.py | {
"start": 1039,
"end": 25232
} | class ____:
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def list_blobs(
self,
bucket_or_name,
max_results=None,
prefix=None,
delimiter=None,
**kwargs,
) -> Iterator:
return iter([])
@pytest.mark.big
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
def test_basic_instantiation(mock_list_keys):
mock_list_keys.return_value = [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
gcs_client: google.Client = cast("google.Client", MockGCSClient())
my_data_connector: DataConnector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"alpha-(.*)\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
assert my_data_connector.get_data_reference_count() == 3
assert my_data_connector.get_data_references()[:3] == [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
assert my_data_connector.get_matched_data_reference_count() == 3
assert my_data_connector.get_matched_data_references()[:3] == [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
assert my_data_connector.get_unmatched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_reference_count() == 0
@pytest.mark.big
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
def test_instantiation_batching_regex_does_not_match_paths(mock_list_keys):
mock_list_keys.return_value = [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
gcs_client: google.Client = cast("google.Client", MockGCSClient())
my_data_connector: DataConnector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<name>.+)_(?P<timestamp>.+)_(?P<price>.*)\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
assert my_data_connector.get_data_reference_count() == 3
assert my_data_connector.get_data_references()[:3] == [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
assert my_data_connector.get_matched_data_reference_count() == 0
assert my_data_connector.get_matched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_references()[:3] == [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
assert my_data_connector.get_unmatched_data_reference_count() == 3
@pytest.mark.big
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
def test_return_all_batch_definitions_unsorted(mock_list_keys):
mock_list_keys.return_value = [
"abe_20200809_1040.csv",
"alex_20200809_1000.csv",
"alex_20200819_1300.csv",
"eugene_20200809_1500.csv",
"eugene_20201129_1900.csv",
"james_20200713_1567.csv",
"james_20200810_1003.csv",
"james_20200811_1009.csv",
"will_20200809_1002.csv",
"will_20200810_1001.csv",
]
gcs_client: google.Client = cast("google.Client", MockGCSClient())
my_data_connector: DataConnector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<name>.+)_(?P<timestamp>.+)_(?P<price>.*)\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
# with missing BatchRequest arguments
with pytest.raises(TypeError):
# noinspection PyArgumentList
my_data_connector.get_batch_definition_list()
# with empty options
unsorted_batch_definition_list: List[LegacyBatchDefinition] = (
my_data_connector.get_batch_definition_list(
BatchRequest(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
options={},
)
)
)
expected: List[LegacyBatchDefinition] = [
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "abe_20200809_1040.csv",
"name": "abe",
"timestamp": "20200809",
"price": "1040",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "alex_20200809_1000.csv",
"name": "alex",
"timestamp": "20200809",
"price": "1000",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "alex_20200819_1300.csv",
"name": "alex",
"timestamp": "20200819",
"price": "1300",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "eugene_20200809_1500.csv",
"name": "eugene",
"timestamp": "20200809",
"price": "1500",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "eugene_20201129_1900.csv",
"name": "eugene",
"timestamp": "20201129",
"price": "1900",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "james_20200713_1567.csv",
"name": "james",
"timestamp": "20200713",
"price": "1567",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "james_20200810_1003.csv",
"name": "james",
"timestamp": "20200810",
"price": "1003",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "james_20200811_1009.csv",
"name": "james",
"timestamp": "20200811",
"price": "1009",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "will_20200809_1002.csv",
"name": "will",
"timestamp": "20200809",
"price": "1002",
}
),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict(
{
"path": "will_20200810_1001.csv",
"name": "will",
"timestamp": "20200810",
"price": "1001",
}
),
),
]
assert expected == unsorted_batch_definition_list
# with specified Batch query options
unsorted_batch_definition_list = my_data_connector.get_batch_definition_list(
BatchRequest(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
options={"name": "alex", "timestamp": "20200819", "price": "1300"},
)
)
assert expected[2:3] == unsorted_batch_definition_list
@pytest.mark.big
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
def test_return_only_unique_batch_definitions(mock_list_keys):
mock_list_keys.return_value = [
"A/file_1.csv",
"A/file_2.csv",
"A/file_3.csv",
]
gcs_client: google.Client = cast("google.Client", MockGCSClient())
my_data_connector: DataConnector
my_data_connector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<name>.+).*\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="A",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
assert my_data_connector.get_data_reference_count() == 3
assert my_data_connector.get_data_references()[:3] == [
"A/file_1.csv",
"A/file_2.csv",
"A/file_3.csv",
]
assert my_data_connector.get_matched_data_reference_count() == 3
assert my_data_connector.get_matched_data_references()[:3] == [
"A/file_1.csv",
"A/file_2.csv",
"A/file_3.csv",
]
assert my_data_connector.get_unmatched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_reference_count() == 0
mock_list_keys.return_value = [
"B/file_1.csv",
"B/file_2.csv",
]
expected: List[LegacyBatchDefinition] = [
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict({"path": "B/file_1.csv", "filename": "file_1"}),
),
LegacyBatchDefinition(
datasource_name="my_file_path_datasource",
data_connector_name="fluent",
data_asset_name="my_google_cloud_storage_data_asset",
batch_identifiers=IDDict({"path": "B/file_2.csv", "filename": "file_2"}),
),
]
my_data_connector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<filename>.+).*\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="B",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
unsorted_batch_definition_list: List[LegacyBatchDefinition] = (
my_data_connector.get_batch_definition_list(
BatchRequest(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
options={},
)
)
)
assert expected == unsorted_batch_definition_list
@pytest.mark.big
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
def test_alpha(mock_list_keys):
mock_list_keys.return_value = [
"test_dir_alpha/A.csv",
"test_dir_alpha/B.csv",
"test_dir_alpha/C.csv",
"test_dir_alpha/D.csv",
]
gcs_client: google.Client = cast("google.Client", MockGCSClient())
my_data_connector: DataConnector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<part_1>.*)\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="test_dir_alpha",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
assert my_data_connector.get_data_reference_count() == 4
assert my_data_connector.get_data_references()[:3] == [
"test_dir_alpha/A.csv",
"test_dir_alpha/B.csv",
"test_dir_alpha/C.csv",
]
assert my_data_connector.get_matched_data_reference_count() == 4
assert my_data_connector.get_matched_data_references()[:3] == [
"test_dir_alpha/A.csv",
"test_dir_alpha/B.csv",
"test_dir_alpha/C.csv",
]
assert my_data_connector.get_unmatched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_reference_count() == 0
my_batch_definition_list: List[LegacyBatchDefinition]
my_batch_definition: LegacyBatchDefinition
my_batch_request: BatchRequest
# Try to fetch a batch from a nonexistent asset
my_batch_request = BatchRequest(datasource_name="BASE", data_asset_name="A", options={})
my_batch_definition_list = my_data_connector.get_batch_definition_list(
batch_request=my_batch_request
)
assert len(my_batch_definition_list) == 0
my_batch_request = BatchRequest(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
options={"part_1": "B"},
)
my_batch_definition_list = my_data_connector.get_batch_definition_list(
batch_request=my_batch_request
)
assert len(my_batch_definition_list) == 1
@pytest.mark.big
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
def test_foxtrot(mock_list_keys):
mock_list_keys.return_value = []
gcs_client: google.Client = cast("google.Client", MockGCSClient())
my_data_connector: DataConnector
my_data_connector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<part_1>.+)-(?P<part_2>.*)\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
assert my_data_connector.get_data_reference_count() == 0
assert my_data_connector.get_data_references()[:3] == []
assert my_data_connector.get_matched_data_reference_count() == 0
assert my_data_connector.get_matched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_reference_count() == 0
mock_list_keys.return_value = [
"test_dir_foxtrot/A/A-1.csv",
"test_dir_foxtrot/A/A-2.csv",
"test_dir_foxtrot/A/A-3.csv",
]
my_data_connector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<part_1>.+)-(?P<part_2>.*)\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="test_dir_foxtrot/A",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
assert my_data_connector.get_data_reference_count() == 3
assert my_data_connector.get_data_references()[:3] == [
"test_dir_foxtrot/A/A-1.csv",
"test_dir_foxtrot/A/A-2.csv",
"test_dir_foxtrot/A/A-3.csv",
]
assert my_data_connector.get_matched_data_reference_count() == 3
assert my_data_connector.get_matched_data_references()[:3] == [
"test_dir_foxtrot/A/A-1.csv",
"test_dir_foxtrot/A/A-2.csv",
"test_dir_foxtrot/A/A-3.csv",
]
assert my_data_connector.get_unmatched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_reference_count() == 0
mock_list_keys.return_value = [
"test_dir_foxtrot/B/B-1.txt",
"test_dir_foxtrot/B/B-2.txt",
"test_dir_foxtrot/B/B-3.txt",
]
my_data_connector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<part_1>.+)-(?P<part_2>.*)\.txt"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="test_dir_foxtrot/B",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
mock_list_keys.return_value = [
"test_dir_foxtrot/B/B-1.txt",
"test_dir_foxtrot/B/B-2.txt",
"test_dir_foxtrot/B/B-3.txt",
]
assert my_data_connector.get_data_reference_count() == 3
assert my_data_connector.get_data_references()[:3] == [
"test_dir_foxtrot/B/B-1.txt",
"test_dir_foxtrot/B/B-2.txt",
"test_dir_foxtrot/B/B-3.txt",
]
assert my_data_connector.get_matched_data_reference_count() == 3
assert my_data_connector.get_matched_data_references()[:3] == [
"test_dir_foxtrot/B/B-1.txt",
"test_dir_foxtrot/B/B-2.txt",
"test_dir_foxtrot/B/B-3.txt",
]
assert my_data_connector.get_unmatched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_reference_count() == 0
my_data_connector = GoogleCloudStorageDataConnector(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
batching_regex=re.compile(r"(?P<part_1>.+)-(?P<part_2>.*)\.csv"),
gcs_client=gcs_client,
bucket_or_name="my_bucket",
prefix="test_dir_foxtrot/C",
file_path_template_map_fn=GCSUrl.OBJECT_URL_TEMPLATE.format,
)
mock_list_keys.return_value = [
"test_dir_foxtrot/C/C-2017.csv",
"test_dir_foxtrot/C/C-2018.csv",
"test_dir_foxtrot/C/C-2019.csv",
]
assert my_data_connector.get_data_reference_count() == 3
assert my_data_connector.get_data_references()[:3] == [
"test_dir_foxtrot/C/C-2017.csv",
"test_dir_foxtrot/C/C-2018.csv",
"test_dir_foxtrot/C/C-2019.csv",
]
assert my_data_connector.get_matched_data_reference_count() == 3
assert my_data_connector.get_matched_data_references()[:3] == [
"test_dir_foxtrot/C/C-2017.csv",
"test_dir_foxtrot/C/C-2018.csv",
"test_dir_foxtrot/C/C-2019.csv",
]
assert my_data_connector.get_unmatched_data_references()[:3] == []
assert my_data_connector.get_unmatched_data_reference_count() == 0
my_batch_request = BatchRequest(
datasource_name="my_file_path_datasource",
data_asset_name="my_google_cloud_storage_data_asset",
options={},
)
my_batch_definition_list: List[LegacyBatchDefinition] = (
my_data_connector.get_batch_definition_list(batch_request=my_batch_request)
)
assert len(my_batch_definition_list) == 3
@pytest.mark.unit
@pytest.mark.parametrize(
"whole_directory_override, expected_batch_count, expected_identifier_key",
[
pytest.param(
True, 1, "path", id="with_whole_directory_override_returns_single_directory_batch"
),
pytest.param(
False,
3,
"filename",
id="without_whole_directory_override_returns_individual_file_batches",
),
],
)
def test_gcs_data_connector_whole_directory_path_override(
whole_directory_override, expected_batch_count, expected_identifier_key, mocker
):
"""Test GoogleCloudStorageDataConnector with whole_directory_path_override parameter."""
# Setup
bucket_name = "test-bucket"
prefix = "test_directory/"
whole_directory_path = f"gs://{bucket_name}/{prefix}"
# Mock GCS client
mock_gcs_client = mocker.Mock()
mock_bucket = mocker.Mock()
mock_gcs_client.bucket.return_value = mock_bucket
# Create mock blobs for file mode testing
mock_blobs = []
test_files = [
"test_directory/file1.csv",
"test_directory/file2.csv",
"test_directory/file3.csv",
]
for file_path in test_files:
mock_blob = mocker.Mock()
mock_blob.name = file_path
mock_blobs.append(mock_blob)
mock_bucket.list_blobs.return_value = mock_blobs
# Create data connector with conditional whole_directory_path_override
data_connector = GoogleCloudStorageDataConnector(
datasource_name="my_gcs_datasource",
data_asset_name="my_data_asset",
gcs_client=mock_gcs_client,
bucket_or_name=bucket_name,
prefix=prefix,
file_path_template_map_fn=lambda bucket_or_name, path: f"gs://{bucket_or_name}/{path}",
whole_directory_path_override=whole_directory_path if whole_directory_override else None,
)
# Create batch request with conditional partitioner
batch_request = BatchRequest(
datasource_name="my_gcs_datasource",
data_asset_name="my_data_asset",
options={},
partitioner=None
if whole_directory_override
else FileNamePartitionerPath(regex=re.compile(r"(?P<filename>.+\.csv)")),
)
batch_definitions = data_connector.get_batch_definition_list(batch_request)
# Verify expected batch count
assert len(batch_definitions) == expected_batch_count
# Verify batch definitions have correct structure
for batch_definition in batch_definitions:
assert batch_definition.datasource_name == "my_gcs_datasource"
assert batch_definition.data_asset_name == "my_data_asset"
assert expected_identifier_key in batch_definition.batch_identifiers
if whole_directory_override:
# For directory mode, verify single batch with directory path
batch_definition = batch_definitions[0]
assert batch_definition.batch_identifiers["path"] == whole_directory_path
else:
# For file mode, verify individual file batches
file_names = [bd.batch_identifiers["filename"] for bd in batch_definitions]
expected_files = ["file1.csv", "file2.csv", "file3.csv"]
assert sorted(file_names) == sorted(expected_files)
@pytest.mark.unit
def test_gcs_data_connector_missing_file_path_template_map_fn_error():
"""Test GCS data connector raises error when file_path_template_map_fn is None."""
gcs_client: google.Client = MockGCSClient()
data_connector = GoogleCloudStorageDataConnector(
datasource_name="my_gcs_datasource",
data_asset_name="my_data_asset",
gcs_client=gcs_client,
bucket_or_name="test-bucket",
prefix="test/",
file_path_template_map_fn=None,
)
with pytest.raises(MissingFilePathTemplateMapFnError):
data_connector._get_full_file_path("test.csv")
| MockGCSClient |
python | PyCQA__pylint | tests/functional/u/unused/unused_import_assigned_to.py | {
"start": 289,
"end": 344
} | class ____:
uuid = test(default=uuid.uuid4)
| BaseModel |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | {
"start": 6687,
"end": 6874
} | class ____(BaseSphinx):
relative_output_dir = "html"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sphinx_builder = "html"
| HtmlBuilder |
python | coleifer__peewee | tests/sqlcipher_ext.py | {
"start": 3152,
"end": 3678
} | class ____(CleanUpModelTestCase):
database = config_db
def test_configuration_via_pragma(self):
# Write some data so the database file is created.
self.database.execute_sql('create table foo (data TEXT)')
self.database.close()
self.database.connect()
self.assertEqual(int(self.database.pragma('kdf_iter')), 1234)
self.assertEqual(int(self.database.pragma('cipher_page_size')), 8192)
self.assertTrue('foo' in self.database.get_tables())
| TestSqlCipherConfiguration |
python | pytorch__pytorch | test/distributions/test_distributions.py | {
"start": 227564,
"end": 249956
} | class ____(DistributionsTestCase):
def setUp(self):
super().setUp()
class Binomial30(Binomial):
def __init__(self, probs):
super().__init__(30, probs)
# These are pairs of distributions with 4 x 4 parameters as specified.
# The first of the pair e.g. bernoulli[0] varies column-wise and the second
# e.g. bernoulli[1] varies row-wise; that way we test all param pairs.
bernoulli = pairwise(Bernoulli, [0.1, 0.2, 0.6, 0.9])
binomial30 = pairwise(Binomial30, [0.1, 0.2, 0.6, 0.9])
binomial_vectorized_count = (
Binomial(torch.tensor([3, 4]), torch.tensor([0.4, 0.6])),
Binomial(torch.tensor([3, 4]), torch.tensor([0.5, 0.8])),
)
beta = pairwise(Beta, [1.0, 2.5, 1.0, 2.5], [1.5, 1.5, 3.5, 3.5])
categorical = pairwise(
Categorical,
[[0.4, 0.3, 0.3], [0.2, 0.7, 0.1], [0.33, 0.33, 0.34], [0.2, 0.2, 0.6]],
)
cauchy = pairwise(Cauchy, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])
chi2 = pairwise(Chi2, [1.0, 2.0, 2.5, 5.0])
dirichlet = pairwise(
Dirichlet,
[[0.1, 0.2, 0.7], [0.5, 0.4, 0.1], [0.33, 0.33, 0.34], [0.2, 0.2, 0.4]],
)
exponential = pairwise(Exponential, [1.0, 2.5, 5.0, 10.0])
gamma = pairwise(Gamma, [1.0, 2.5, 1.0, 2.5], [1.5, 1.5, 3.5, 3.5])
gumbel = pairwise(Gumbel, [-2.0, 4.0, -3.0, 6.0], [1.0, 2.5, 1.0, 2.5])
halfnormal = pairwise(HalfNormal, [1.0, 2.0, 1.0, 2.0])
inversegamma = pairwise(
InverseGamma, [1.0, 2.5, 1.0, 2.5], [1.5, 1.5, 3.5, 3.5]
)
laplace = pairwise(Laplace, [-2.0, 4.0, -3.0, 6.0], [1.0, 2.5, 1.0, 2.5])
lognormal = pairwise(LogNormal, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])
normal = pairwise(Normal, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])
independent = (Independent(normal[0], 1), Independent(normal[1], 1))
onehotcategorical = pairwise(
OneHotCategorical,
[[0.4, 0.3, 0.3], [0.2, 0.7, 0.1], [0.33, 0.33, 0.34], [0.2, 0.2, 0.6]],
)
pareto = (
Pareto(
torch.tensor([2.5, 4.0, 2.5, 4.0]).expand(4, 4),
torch.tensor([2.25, 3.75, 2.25, 3.75]).expand(4, 4),
),
Pareto(
torch.tensor([2.25, 3.75, 2.25, 3.8]).expand(4, 4),
torch.tensor([2.25, 3.75, 2.25, 3.75]).expand(4, 4),
),
)
poisson = pairwise(Poisson, [0.3, 1.0, 5.0, 10.0])
uniform_within_unit = pairwise(
Uniform, [0.1, 0.9, 0.2, 0.75], [0.15, 0.95, 0.25, 0.8]
)
uniform_positive = pairwise(Uniform, [1, 1.5, 2, 4], [1.2, 2.0, 3, 7])
uniform_real = pairwise(Uniform, [-2.0, -1, 0, 2], [-1.0, 1, 1, 4])
uniform_pareto = pairwise(Uniform, [6.5, 7.5, 6.5, 8.5], [7.5, 8.5, 9.5, 9.5])
continuous_bernoulli = pairwise(ContinuousBernoulli, [0.1, 0.2, 0.5, 0.9])
# These tests should pass with precision = 0.01, but that makes tests very expensive.
# Instead, we test with precision = 0.1 and only test with higher precision locally
# when adding a new KL implementation.
# The following pairs are not tested due to very high variance of the monte carlo
# estimator; their implementations have been reviewed with extra care:
# - (pareto, normal)
self.precision = 0.1 # Set this to 0.01 when testing a new KL implementation.
self.max_samples = int(1e07) # Increase this when testing at smaller precision.
self.samples_per_batch = int(1e04)
self.finite_examples = [
(bernoulli, bernoulli),
(bernoulli, poisson),
(beta, beta),
(beta, chi2),
(beta, exponential),
(beta, gamma),
(beta, normal),
(binomial30, binomial30),
(binomial_vectorized_count, binomial_vectorized_count),
(categorical, categorical),
(cauchy, cauchy),
(chi2, chi2),
(chi2, exponential),
(chi2, gamma),
(chi2, normal),
(dirichlet, dirichlet),
(exponential, chi2),
(exponential, exponential),
(exponential, gamma),
(exponential, gumbel),
(exponential, normal),
(gamma, chi2),
(gamma, exponential),
(gamma, gamma),
(gamma, gumbel),
(gamma, normal),
(gumbel, gumbel),
(gumbel, normal),
(halfnormal, halfnormal),
(independent, independent),
(inversegamma, inversegamma),
(laplace, laplace),
(lognormal, lognormal),
(laplace, normal),
(normal, gumbel),
(normal, laplace),
(normal, normal),
(onehotcategorical, onehotcategorical),
(pareto, chi2),
(pareto, pareto),
(pareto, exponential),
(pareto, gamma),
(poisson, poisson),
(uniform_within_unit, beta),
(uniform_positive, chi2),
(uniform_positive, exponential),
(uniform_positive, gamma),
(uniform_real, gumbel),
(uniform_real, normal),
(uniform_pareto, pareto),
(continuous_bernoulli, continuous_bernoulli),
(continuous_bernoulli, exponential),
(continuous_bernoulli, normal),
(beta, continuous_bernoulli),
]
self.infinite_examples = [
(Bernoulli(0), Bernoulli(1)),
(Bernoulli(1), Bernoulli(0)),
(
Categorical(torch.tensor([0.9, 0.1])),
Categorical(torch.tensor([1.0, 0.0])),
),
(
Categorical(torch.tensor([[0.9, 0.1], [0.9, 0.1]])),
Categorical(torch.tensor([1.0, 0.0])),
),
(Beta(1, 2), Uniform(0.25, 1)),
(Beta(1, 2), Uniform(0, 0.75)),
(Beta(1, 2), Uniform(0.25, 0.75)),
(Beta(1, 2), Pareto(1, 2)),
(Binomial(31, 0.7), Binomial(30, 0.3)),
(
Binomial(torch.tensor([3, 4]), torch.tensor([0.4, 0.6])),
Binomial(torch.tensor([2, 3]), torch.tensor([0.5, 0.8])),
),
(Chi2(1), Beta(2, 3)),
(Chi2(1), Pareto(2, 3)),
(Chi2(1), Uniform(-2, 3)),
(Exponential(1), Beta(2, 3)),
(Exponential(1), Pareto(2, 3)),
(Exponential(1), Uniform(-2, 3)),
(Gamma(1, 2), Beta(3, 4)),
(Gamma(1, 2), Pareto(3, 4)),
(Gamma(1, 2), Uniform(-3, 4)),
(Gumbel(-1, 2), Beta(3, 4)),
(Gumbel(-1, 2), Chi2(3)),
(Gumbel(-1, 2), Exponential(3)),
(Gumbel(-1, 2), Gamma(3, 4)),
(Gumbel(-1, 2), Pareto(3, 4)),
(Gumbel(-1, 2), Uniform(-3, 4)),
(Laplace(-1, 2), Beta(3, 4)),
(Laplace(-1, 2), Chi2(3)),
(Laplace(-1, 2), Exponential(3)),
(Laplace(-1, 2), Gamma(3, 4)),
(Laplace(-1, 2), Pareto(3, 4)),
(Laplace(-1, 2), Uniform(-3, 4)),
(Normal(-1, 2), Beta(3, 4)),
(Normal(-1, 2), Chi2(3)),
(Normal(-1, 2), Exponential(3)),
(Normal(-1, 2), Gamma(3, 4)),
(Normal(-1, 2), Pareto(3, 4)),
(Normal(-1, 2), Uniform(-3, 4)),
(Pareto(2, 1), Chi2(3)),
(Pareto(2, 1), Exponential(3)),
(Pareto(2, 1), Gamma(3, 4)),
(Pareto(1, 2), Normal(-3, 4)),
(Pareto(1, 2), Pareto(3, 4)),
(Poisson(2), Bernoulli(0.5)),
(Poisson(2.3), Binomial(10, 0.2)),
(Uniform(-1, 1), Beta(2, 2)),
(Uniform(0, 2), Beta(3, 4)),
(Uniform(-1, 2), Beta(3, 4)),
(Uniform(-1, 2), Chi2(3)),
(Uniform(-1, 2), Exponential(3)),
(Uniform(-1, 2), Gamma(3, 4)),
(Uniform(-1, 2), Pareto(3, 4)),
(ContinuousBernoulli(0.25), Uniform(0.25, 1)),
(ContinuousBernoulli(0.25), Uniform(0, 0.75)),
(ContinuousBernoulli(0.25), Uniform(0.25, 0.75)),
(ContinuousBernoulli(0.25), Pareto(1, 2)),
(Exponential(1), ContinuousBernoulli(0.75)),
(Gamma(1, 2), ContinuousBernoulli(0.75)),
(Gumbel(-1, 2), ContinuousBernoulli(0.75)),
(Laplace(-1, 2), ContinuousBernoulli(0.75)),
(Normal(-1, 2), ContinuousBernoulli(0.75)),
(Uniform(-1, 1), ContinuousBernoulli(0.75)),
(Uniform(0, 2), ContinuousBernoulli(0.75)),
(Uniform(-1, 2), ContinuousBernoulli(0.75)),
]
def test_kl_monte_carlo(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for (p, _), (_, q) in self.finite_examples:
actual = kl_divergence(p, q)
numerator = 0
denominator = 0
while denominator < self.max_samples:
x = p.sample(sample_shape=(self.samples_per_batch,))
numerator += (p.log_prob(x) - q.log_prob(x)).sum(0)
denominator += x.size(0)
expected = numerator / denominator
error = torch.abs(expected - actual) / (1 + expected)
if error[error == error].max() < self.precision:
break
self.assertLess(
error[error == error].max(),
self.precision,
"\n".join(
[
f"Incorrect KL({type(p).__name__}, {type(q).__name__}).",
f"Expected ({denominator} Monte Carlo samples): {expected}",
f"Actual (analytic): {actual}",
]
),
)
# Multivariate normal has a separate Monte Carlo based test due to the requirement of random generation of
# positive (semi) definite matrices. n is set to 5, but can be increased during testing.
def test_kl_multivariate_normal(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
n = 5 # Number of tests for multivariate_normal
for i in range(n):
loc = [torch.randn(4) for _ in range(2)]
scale_tril = [
transform_to(constraints.lower_cholesky)(torch.randn(4, 4))
for _ in range(2)
]
p = MultivariateNormal(loc=loc[0], scale_tril=scale_tril[0])
q = MultivariateNormal(loc=loc[1], scale_tril=scale_tril[1])
actual = kl_divergence(p, q)
numerator = 0
denominator = 0
while denominator < self.max_samples:
x = p.sample(sample_shape=(self.samples_per_batch,))
numerator += (p.log_prob(x) - q.log_prob(x)).sum(0)
denominator += x.size(0)
expected = numerator / denominator
error = torch.abs(expected - actual) / (1 + expected)
if error[error == error].max() < self.precision:
break
self.assertLess(
error[error == error].max(),
self.precision,
"\n".join(
[
f"Incorrect KL(MultivariateNormal, MultivariateNormal) instance {i + 1}/{n}",
f"Expected ({denominator} Monte Carlo sample): {expected}",
f"Actual (analytic): {actual}",
]
),
)
def test_kl_multivariate_normal_batched(self):
b = 7 # Number of batches
loc = [torch.randn(b, 3) for _ in range(2)]
scale_tril = [
transform_to(constraints.lower_cholesky)(torch.randn(b, 3, 3))
for _ in range(2)
]
expected_kl = torch.stack(
[
kl_divergence(
MultivariateNormal(loc[0][i], scale_tril=scale_tril[0][i]),
MultivariateNormal(loc[1][i], scale_tril=scale_tril[1][i]),
)
for i in range(b)
]
)
actual_kl = kl_divergence(
MultivariateNormal(loc[0], scale_tril=scale_tril[0]),
MultivariateNormal(loc[1], scale_tril=scale_tril[1]),
)
self.assertEqual(expected_kl, actual_kl)
def test_kl_multivariate_normal_batched_broadcasted(self):
b = 7 # Number of batches
loc = [torch.randn(b, 3) for _ in range(2)]
scale_tril = [
transform_to(constraints.lower_cholesky)(torch.randn(b, 3, 3)),
transform_to(constraints.lower_cholesky)(torch.randn(3, 3)),
]
expected_kl = torch.stack(
[
kl_divergence(
MultivariateNormal(loc[0][i], scale_tril=scale_tril[0][i]),
MultivariateNormal(loc[1][i], scale_tril=scale_tril[1]),
)
for i in range(b)
]
)
actual_kl = kl_divergence(
MultivariateNormal(loc[0], scale_tril=scale_tril[0]),
MultivariateNormal(loc[1], scale_tril=scale_tril[1]),
)
self.assertEqual(expected_kl, actual_kl)
def test_kl_lowrank_multivariate_normal(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
n = 5 # Number of tests for lowrank_multivariate_normal
for i in range(n):
loc = [torch.randn(4) for _ in range(2)]
cov_factor = [torch.randn(4, 3) for _ in range(2)]
cov_diag = [
transform_to(constraints.positive)(torch.randn(4)) for _ in range(2)
]
covariance_matrix = [
cov_factor[i].matmul(cov_factor[i].t()) + cov_diag[i].diag()
for i in range(2)
]
p = LowRankMultivariateNormal(loc[0], cov_factor[0], cov_diag[0])
q = LowRankMultivariateNormal(loc[1], cov_factor[1], cov_diag[1])
p_full = MultivariateNormal(loc[0], covariance_matrix[0])
q_full = MultivariateNormal(loc[1], covariance_matrix[1])
expected = kl_divergence(p_full, q_full)
actual_lowrank_lowrank = kl_divergence(p, q)
actual_lowrank_full = kl_divergence(p, q_full)
actual_full_lowrank = kl_divergence(p_full, q)
error_lowrank_lowrank = torch.abs(actual_lowrank_lowrank - expected).max()
self.assertLess(
error_lowrank_lowrank,
self.precision,
"\n".join(
[
f"Incorrect KL(LowRankMultivariateNormal, LowRankMultivariateNormal) instance {i + 1}/{n}",
f"Expected (from KL MultivariateNormal): {expected}",
f"Actual (analytic): {actual_lowrank_lowrank}",
]
),
)
error_lowrank_full = torch.abs(actual_lowrank_full - expected).max()
self.assertLess(
error_lowrank_full,
self.precision,
"\n".join(
[
f"Incorrect KL(LowRankMultivariateNormal, MultivariateNormal) instance {i + 1}/{n}",
f"Expected (from KL MultivariateNormal): {expected}",
f"Actual (analytic): {actual_lowrank_full}",
]
),
)
error_full_lowrank = torch.abs(actual_full_lowrank - expected).max()
self.assertLess(
error_full_lowrank,
self.precision,
"\n".join(
[
f"Incorrect KL(MultivariateNormal, LowRankMultivariateNormal) instance {i + 1}/{n}",
f"Expected (from KL MultivariateNormal): {expected}",
f"Actual (analytic): {actual_full_lowrank}",
]
),
)
def test_kl_lowrank_multivariate_normal_batched(self):
b = 7 # Number of batches
loc = [torch.randn(b, 3) for _ in range(2)]
cov_factor = [torch.randn(b, 3, 2) for _ in range(2)]
cov_diag = [
transform_to(constraints.positive)(torch.randn(b, 3)) for _ in range(2)
]
expected_kl = torch.stack(
[
kl_divergence(
LowRankMultivariateNormal(
loc[0][i], cov_factor[0][i], cov_diag[0][i]
),
LowRankMultivariateNormal(
loc[1][i], cov_factor[1][i], cov_diag[1][i]
),
)
for i in range(b)
]
)
actual_kl = kl_divergence(
LowRankMultivariateNormal(loc[0], cov_factor[0], cov_diag[0]),
LowRankMultivariateNormal(loc[1], cov_factor[1], cov_diag[1]),
)
self.assertEqual(expected_kl, actual_kl)
def test_kl_exponential_family(self):
for (p, _), (_, q) in self.finite_examples:
if type(p) is type(q) and issubclass(type(p), ExponentialFamily):
actual = kl_divergence(p, q)
expected = _kl_expfamily_expfamily(p, q)
self.assertEqual(
actual,
expected,
msg="\n".join(
[
f"Incorrect KL({type(p).__name__}, {type(q).__name__}).",
f"Expected (using Bregman Divergence) {expected}",
f"Actual (analytic) {actual}",
f"max error = {torch.abs(actual - expected).max()}",
]
),
)
def test_kl_infinite(self):
for p, q in self.infinite_examples:
self.assertTrue(
(kl_divergence(p, q) == inf).all(),
f"Incorrect KL({type(p).__name__}, {type(q).__name__})",
)
def test_kl_edgecases(self):
self.assertEqual(kl_divergence(Bernoulli(0), Bernoulli(0)), 0)
self.assertEqual(kl_divergence(Bernoulli(1), Bernoulli(1)), 0)
self.assertEqual(
kl_divergence(
Categorical(torch.tensor([0.0, 1.0])),
Categorical(torch.tensor([0.0, 1.0])),
),
0,
)
self.assertEqual(kl_divergence(Uniform(0, 1), Beta(1, 1)), 0)
def test_kl_shape(self):
for Dist, params in _get_examples():
for i, param in enumerate(params):
dist = Dist(**param)
try:
kl = kl_divergence(dist, dist)
except NotImplementedError:
continue
expected_shape = dist.batch_shape if dist.batch_shape else torch.Size()
self.assertEqual(
kl.shape,
expected_shape,
msg="\n".join(
[
f"{Dist.__name__} example {i + 1}/{len(params)}",
f"Expected {expected_shape}",
f"Actual {kl.shape}",
]
),
)
def test_kl_transformed(self):
# Regression test for https://github.com/pytorch/pytorch/issues/34859
scale = torch.ones(2, 3)
loc = torch.zeros(2, 3)
normal = Normal(loc=loc, scale=scale)
diag_normal = Independent(normal, reinterpreted_batch_ndims=1)
trans_dist = TransformedDistribution(
diag_normal, AffineTransform(loc=0.0, scale=2.0)
)
self.assertEqual(kl_divergence(diag_normal, diag_normal).shape, (2,))
self.assertEqual(kl_divergence(trans_dist, trans_dist).shape, (2,))
@set_default_dtype(torch.double)
def test_entropy_monte_carlo(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for Dist, params in _get_examples():
for i, param in enumerate(params):
dist = Dist(**param)
try:
actual = dist.entropy()
except NotImplementedError:
continue
x = dist.sample(sample_shape=(60000,))
expected = -dist.log_prob(x).mean(0)
ignore = (expected == inf) | (expected == -inf)
expected[ignore] = actual[ignore]
self.assertEqual(
actual,
expected,
atol=0.2,
rtol=0,
msg="\n".join(
[
f"{Dist.__name__} example {i + 1}/{len(params)}, incorrect .entropy().",
f"Expected (monte carlo) {expected}",
f"Actual (analytic) {actual}",
f"max error = {torch.abs(actual - expected).max()}",
]
),
)
@set_default_dtype(torch.double)
def test_entropy_exponential_family(self):
for Dist, params in _get_examples():
if not issubclass(Dist, ExponentialFamily):
continue
for i, param in enumerate(params):
dist = Dist(**param)
try:
actual = dist.entropy()
except NotImplementedError:
continue
try:
expected = ExponentialFamily.entropy(dist)
except NotImplementedError:
continue
self.assertEqual(
actual,
expected,
msg="\n".join(
[
f"{Dist.__name__} example {i + 1}/{len(params)}, incorrect .entropy().",
f"Expected (Bregman Divergence) {expected}",
f"Actual (analytic) {actual}",
f"max error = {torch.abs(actual - expected).max()}",
]
),
)
| TestKL |
python | jd__tenacity | tests/test_asyncio.py | {
"start": 4547,
"end": 4851
} | class ____(unittest.TestCase):
def test_trio_basic(self):
thing = NoIOErrorAfterCount(5)
@retry
async def trio_function():
await trio.sleep(0.00001)
return thing.go()
trio.run(trio_function)
assert thing.counter == thing.count
| TestTrio |
python | readthedocs__readthedocs.org | readthedocs/integrations/models.py | {
"start": 904,
"end": 5205
} | class ____(models.Manager):
"""HTTP exchange manager methods."""
# Filter rules for request headers to remove from the output
REQ_FILTER_RULES = [
re.compile("^X-Forwarded-.*$", re.I),
re.compile("^X-Real-Ip$", re.I),
]
@transaction.atomic
def from_exchange(self, req, resp, related_object, payload=None):
"""
Create object from Django request and response objects.
If an explicit Request ``payload`` is not specified, the payload will be
determined directly from the Request object. This makes a good effort to
normalize the data, however we don't enforce that the payload is JSON
:param req: Request object to store
:type req: HttpRequest
:param resp: Response object to store
:type resp: HttpResponse
:param related_object: Object to use for generic relation
:param payload: Alternate payload object to store
:type payload: dict
"""
request_payload = payload
if request_payload is None:
request_payload = normalize_request_payload(req)
try:
request_body = json.dumps(request_payload, sort_keys=True)
except TypeError:
request_body = str(request_payload)
# This is the rawest form of request header we have, the WSGI
# headers. HTTP headers are prefixed with `HTTP_`, which we remove,
# and because the keys are all uppercase, we'll normalize them to
# title case-y hyphen separated values.
request_headers = {
key[5:].title().replace("_", "-"): str(val)
for (key, val) in list(req.META.items())
if key.startswith("HTTP_")
} # yapf: disable
request_headers["Content-Type"] = req.content_type
# Remove unwanted headers
for filter_rule in self.REQ_FILTER_RULES:
for key in list(request_headers.keys()):
if filter_rule.match(key):
del request_headers[key]
response_payload = resp.data if hasattr(resp, "data") else resp.content
try:
response_body = json.dumps(response_payload, sort_keys=True)
except TypeError:
response_body = str(response_payload)
response_headers = dict(list(resp.items()))
fields = {
"status_code": resp.status_code,
"request_headers": request_headers,
"request_body": request_body,
"response_body": response_body,
"response_headers": response_headers,
}
fields["related_object"] = related_object
obj = self.create(**fields)
self.delete_limit(related_object)
return obj
def from_requests_exchange(self, response, related_object):
"""
Create an exchange object from a requests' response.
:param response: The result from calling request.post() or similar.
:param related_object: Object to use for generic relationship.
"""
request = response.request
# NOTE: we need to cast ``request.headers`` and ``response.headers``
# because it's a ``requests.structures.CaseInsensitiveDict`` which is
# not JSON serializable.
obj = self.create(
related_object=related_object,
request_headers=dict(request.headers) or {},
request_body=request.body or "",
status_code=response.status_code,
response_headers=dict(response.headers),
response_body=response.text,
)
self.delete_limit(related_object)
return obj
def delete_limit(self, related_object, limit=10):
# If the related_object is an instance of Integration,
# it could be a proxy model, so we force it to always be the "real" model.
if isinstance(related_object, Integration):
model = Integration
else:
model = related_object
queryset = self.filter(
content_type=ContentType.objects.get(
app_label=model._meta.app_label,
model=model._meta.model_name,
),
object_id=related_object.pk,
)
for exchange in queryset[limit:]:
exchange.delete()
| HttpExchangeManager |
python | pytorch__pytorch | torch/distributions/one_hot_categorical.py | {
"start": 354,
"end": 4433
} | class ____(Distribution):
r"""
Creates a one-hot categorical distribution parameterized by :attr:`probs` or
:attr:`logits`.
Samples are one-hot coded vectors of size ``probs.size(-1)``.
.. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
will return this normalized value.
The `logits` argument will be interpreted as unnormalized log probabilities
and can therefore be any real number. It will likewise be normalized so that
the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
will return this normalized value.
See also: :func:`torch.distributions.Categorical` for specifications of
:attr:`probs` and :attr:`logits`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
tensor([ 0., 0., 0., 1.])
Args:
probs (Tensor): event probabilities
logits (Tensor): event log probabilities (unnormalized)
"""
# pyrefly: ignore [bad-override]
arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector}
support = constraints.one_hot
has_enumerate_support = True
def __init__(
self,
probs: Optional[Tensor] = None,
logits: Optional[Tensor] = None,
validate_args: Optional[bool] = None,
) -> None:
self._categorical = Categorical(probs, logits)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super().__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(OneHotCategorical, _instance)
batch_shape = torch.Size(batch_shape)
new._categorical = self._categorical.expand(batch_shape)
super(OneHotCategorical, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def _param(self) -> Tensor:
return self._categorical._param
@property
def probs(self) -> Tensor:
return self._categorical.probs
@property
def logits(self) -> Tensor:
return self._categorical.logits
@property
def mean(self) -> Tensor:
return self._categorical.probs
@property
def mode(self) -> Tensor:
probs = self._categorical.probs
mode = probs.argmax(dim=-1)
return torch.nn.functional.one_hot(mode, num_classes=probs.shape[-1]).to(probs)
@property
def variance(self) -> Tensor:
return self._categorical.probs * (1 - self._categorical.probs)
@property
def param_shape(self) -> torch.Size:
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
probs = self._categorical.probs
num_events = self._categorical._num_events
indices = self._categorical.sample(sample_shape)
return torch.nn.functional.one_hot(indices, num_events).to(probs)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
def entropy(self):
return self._categorical.entropy()
def enumerate_support(self, expand=True):
n = self.event_shape[0]
values = torch.eye(n, dtype=self._param.dtype, device=self._param.device)
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
if expand:
values = values.expand((n,) + self.batch_shape + (n,))
return values
| OneHotCategorical |
python | pytorch__pytorch | test/mobile/model_test/tensor_ops.py | {
"start": 8028,
"end": 8578
} | class ____(torch.nn.Module):
def forward(self):
return self.tensor_view_ops()
def tensor_view_ops(self):
x = torch.randn(4, 4, 1)
y = torch.randn(4, 4, 2)
return len(
x[0, 2:],
x.detach(),
x.detach_(),
x.diagonal(),
x.expand(-1, -1, 3),
x.expand_as(y),
x.select(0, 1),
x.unflatten(1, (2, 2)),
x.unfold(1, 2, 2),
x.view(16),
x.view_as(torch.randn(16)),
)
| TensorViewOpsModule |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 24211,
"end": 26517
} | class ____(MemoryPlanningLine):
node: BufferLike
def __post_init__(self):
assert V.graph.scheduler.current_node is not None
self.scheduler_node_index = V.graph.scheduler.nodes.index(
V.graph.scheduler.current_node
)
def should_reuse_buffer(self, free_line: FreeIfNotReusedLine, size: int) -> bool:
if free_line.scheduler_node_index + 1 == self.scheduler_node_index:
return True
overall_peak_memory = self.wrapper.estimate_peak.overall_peak_memory
peak_memory_in_range = self.wrapper.estimate_peak.peak_between(free_line, self)
new_peak_memory = size + peak_memory_in_range
return new_peak_memory <= overall_peak_memory
def plan(self, state: MemoryPlanningState) -> MemoryPlanningLine:
if self.node.get_name() in V.graph.removed_buffers:
return NullLine(self.wrapper)
# try to reuse a recently freed buffer
key = buffer_reuse_key(self.node)
if config.allow_buffer_reuse and key in state:
free_line = state.pop(key)
size = V.graph.sizevars.size_hint(
V.graph.get_allocation_storage_size(self.node), fallback=0
) * get_dtype_size(self.node.get_dtype())
if self.should_reuse_buffer(free_line, size):
free_line.is_reused = True
self.wrapper.estimate_peak.update_peak_between(free_line, self)
return ReuseLine(self.wrapper, free_line.node, self.node)
else:
state.push(key, free_line)
return self
if self.node.get_device_or_error().type == "cpu":
static_shape = self.wrapper.static_shape_for_buffer_or_none(self.node)
if static_shape is not None:
state.total_allocated_buffer_size += int(
functools.reduce(operator.mul, static_shape, 1)
)
return self
def codegen(self, code: IndentedBuffer) -> None:
assert self.node.get_name() not in V.graph.removed_buffers
line = self.wrapper.make_buffer_allocation(self.node)
code.writeline(line)
def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:
return converter._generate_allocate
@dataclasses.dataclass
| AllocateLine |
python | conda__conda | conda/gateways/repodata/jlap/interface.py | {
"start": 4391,
"end": 4659
} | class ____(JlapRepoInterface):
"""
Support repodata.json.zst (if available) without checking .jlap
"""
def _repodata_state_copy(self, state: dict | RepodataState):
return RepodataStateSkipFormat(dict=state, skip_formats=["jlap"])
| ZstdRepoInterface |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 142592,
"end": 143480
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of ApproveDeployments"""
__schema__ = github_schema
__field_names__ = ("workflow_run_id", "environment_ids", "comment", "client_mutation_id")
workflow_run_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="workflowRunId")
"""The node ID of the workflow run containing the pending
deployments.
"""
environment_ids = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(ID))), graphql_name="environmentIds")
"""The ids of environments to reject deployments"""
comment = sgqlc.types.Field(String, graphql_name="comment")
"""Optional comment for approving deployments"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| ApproveDeploymentsInput |
python | getsentry__sentry | tests/snuba/rules/conditions/test_event_frequency.py | {
"start": 51384,
"end": 51599
} | class ____(ErrorEventMixin, EventFrequencyConditionTestCase):
pass
@freeze_time(
(timezone.now() - timedelta(days=2)).replace(hour=12, minute=40, second=0, microsecond=0)
)
| ErrorIssueFrequencyConditionTestCase |
python | ansible__ansible | test/units/module_utils/facts/test_facts.py | {
"start": 6038,
"end": 6227
} | class ____(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = network.openbsd.OpenBSDNetwork
collector_class = network.openbsd.OpenBSDNetworkCollector
| TestOpenBSDNetwork |
python | falconry__falcon | tests/test_cookies.py | {
"start": 2632,
"end": 17856
} | class ____:
def on_get(self, req, resp):
# change lax to strict
resp.unset_cookie('foo', same_site='Strict')
# change strict to lax
resp.unset_cookie('bar')
# change none to ''
resp.unset_cookie('baz', same_site='')
# change '' to none
resp.unset_cookie('barz', same_site='None')
@pytest.fixture
def client(asgi, util):
app = util.create_app(asgi)
app.add_route('/', CookieResource())
app.add_route('/test-convert', CookieResourceMaxAgeFloatString())
app.add_route('/same-site', CookieResourceSameSite())
app.add_route('/partitioned', CookieResourcePartitioned())
app.add_route('/unset-cookie', CookieUnset())
app.add_route('/unset-cookie-same-site', CookieUnsetSameSite())
return testing.TestClient(app)
# =====================================================================
# Response
# =====================================================================
def test_response_base_case(client):
result = client.simulate_get('/')
cookie = result.cookies['foo']
assert cookie.name == 'foo'
assert cookie.value == 'bar'
assert cookie.domain == 'example.com'
assert cookie.http_only
# NOTE(kgriffs): Explicitly test for None to ensure
# falcon.testing.Cookie is returning exactly what we
# expect. Apps using falcon.testing.Cookie can be a
# bit more cavalier if they wish.
assert cookie.max_age is None
assert cookie.expires is None
assert cookie.path == '/'
assert cookie.secure
def test_response_disable_secure_globally(client):
client.app.resp_options.secure_cookies_by_default = False
result = client.simulate_get('/')
cookie = result.cookies['foo']
assert not cookie.secure
client.app.resp_options.secure_cookies_by_default = True
result = client.simulate_get('/')
cookie = result.cookies['foo']
assert cookie.secure
def test_response_complex_case(client):
result = client.simulate_head('/')
assert len(result.cookies) == 3
cookie = result.cookies['foo']
assert cookie.value == 'bar'
assert cookie.domain is None
assert cookie.expires is None
assert cookie.http_only
assert cookie.max_age == 300
assert cookie.path is None
assert cookie.secure
assert not cookie.partitioned
cookie = result.cookies['bar']
assert cookie.value == 'baz'
assert cookie.domain is None
assert cookie.expires is None
assert not cookie.http_only
assert cookie.max_age is None
assert cookie.path is None
assert cookie.secure
assert not cookie.partitioned
cookie = result.cookies['bad']
assert cookie.value == '' # An unset cookie has an empty value
assert cookie.domain is None
assert cookie.same_site == 'Lax'
assert cookie.expires < _utcnow()
# NOTE(kgriffs): I know accessing a private attr like this is
# naughty of me, but we just need to sanity-check that the
# string is GMT.
assert cookie._expires.endswith('GMT')
assert cookie.http_only
assert cookie.max_age is None
assert cookie.path is None
assert cookie.secure
def test_unset_cookies(client):
result = client.simulate_get('/unset-cookie')
assert len(result.cookies) == 5
def test(cookie, path, domain, same_site='Lax'):
assert cookie.value == '' # An unset cookie has an empty value
assert cookie.domain == domain
assert cookie.path == path
assert cookie.same_site == same_site
assert cookie.expires < _utcnow()
test(result.cookies['foo'], path=None, domain=None)
test(result.cookies['bar'], path='/bar', domain=None)
test(result.cookies['baz'], path=None, domain='www.example.com')
test(result.cookies['foobar'], path='/foo', domain='www.example.com')
test(
result.cookies['barfoo'],
path='/foo',
domain='www.example.com',
same_site='none',
)
def test_unset_cookies_samesite(client):
# Test possible different samesite values in set_cookies
# foo, bar, lax
result_set_lax_empty = client.simulate_get('/same-site')
# bar, foo, strict
result_set_strict = client.simulate_post('/same-site')
# baz, foo, none
result_set_none = client.simulate_put('/same-site')
def test_set(cookie, value, samesite=None):
assert cookie.value == value
assert cookie.same_site == samesite
test_set(result_set_lax_empty.cookies['foo'], 'bar', samesite='Lax')
test_set(result_set_strict.cookies['bar'], 'foo', samesite='Strict')
test_set(result_set_none.cookies['baz'], 'foo', samesite='None')
# barz gets set with '', that is None value
test_set(result_set_lax_empty.cookies['barz'], 'barz')
test_set(result_set_lax_empty.cookies['barz'], 'barz', samesite=None)
# Unset the cookies with different samesite values
result_unset = client.simulate_get('/unset-cookie-same-site')
assert len(result_unset.cookies) == 4
def test_unset(cookie, same_site='Lax'):
assert cookie.value == '' # An unset cookie has an empty value
assert cookie.same_site == same_site
assert cookie.expires < _utcnow()
test_unset(result_unset.cookies['foo'], same_site='Strict')
# default: bar is unset with no same_site param, so should go to Lax
test_unset(result_unset.cookies['bar'], same_site='Lax')
test_unset(result_unset.cookies['bar']) # default in test_unset
test_unset(
result_unset.cookies['baz'], same_site=None
) # baz gets unset to same_site = ''
test_unset(result_unset.cookies['barz'], same_site='None')
# test for false
assert result_unset.cookies['baz'].same_site != 'Strict'
assert result_unset.cookies['foo'].same_site != 'Lax'
assert not result_unset.cookies['baz'].same_site
def test_cookie_expires_naive(client):
result = client.simulate_post('/')
cookie = result.cookies['foo']
assert cookie.value == 'bar'
assert cookie.domain is None
assert cookie.expires == datetime(year=2050, month=1, day=1, tzinfo=timezone.utc)
assert not cookie.http_only
assert cookie.max_age is None
assert cookie.path is None
assert not cookie.secure
def test_cookie_expires_aware(client):
result = client.simulate_put('/')
cookie = result.cookies['foo']
assert cookie.value == 'bar'
assert cookie.domain is None
assert cookie.expires == datetime(
year=2049, month=12, day=31, hour=23, tzinfo=timezone.utc
)
assert not cookie.http_only
assert cookie.max_age is None
assert cookie.path is None
assert not cookie.secure
def test_cookies_setable():
resp = falcon.Response()
assert resp._cookies is None
resp.set_cookie('foo', 'wrong-cookie', max_age=301)
resp.set_cookie('foo', 'bar', max_age=300)
resp.set_cookie('bar', 'baz', same_site='None', partitioned=True)
morsel1 = resp._cookies['foo']
morsel2 = resp._cookies['bar']
assert isinstance(morsel1, http_cookies.Morsel)
assert morsel1.key == 'foo'
assert morsel1.value == 'bar'
assert morsel1['max-age'] == 300
assert isinstance(morsel2, http_cookies.Morsel)
assert morsel2.key == 'bar'
assert morsel2.value == 'baz'
assert morsel2['partitioned'] is True
assert morsel2.output() == (
'Set-Cookie: bar=baz; HttpOnly; Partitioned; SameSite=None; Secure'
)
@pytest.mark.parametrize('cookie_name', ('foofloat', 'foostring'))
def test_cookie_max_age_float_and_string(client, cookie_name):
# NOTE(tbug): Falcon implicitly converts max-age values to integers,
# to ensure RFC 6265-compliance of the attribute value.
result = client.simulate_get('/test-convert')
cookie = result.cookies[cookie_name]
assert cookie.value == 'bar'
assert cookie.domain is None
assert cookie.expires is None
assert not cookie.http_only
assert cookie.max_age == 15
assert cookie.path is None
assert not cookie.secure
def test_response_unset_cookie():
resp = falcon.Response()
resp.unset_cookie('bad')
resp.set_cookie('bad', 'cookie', max_age=300)
resp.unset_cookie('bad')
morsels = list(resp._cookies.values())
assert len(morsels) == 1
bad_cookie = morsels[0]
assert bad_cookie['expires'] == -1
output = bad_cookie.OutputString()
assert 'bad=;' in output or 'bad="";' in output
match = re.search('expires=([^;]+)', output)
assert match
expiration = http_date_to_dt(match.group(1), obs_date=True)
assert expiration < _utcnow()
# =====================================================================
# Request
# =====================================================================
def test_request_cookie_parsing():
# testing with a github-ish set of cookies
headers = [
(
'Cookie',
"""
logged_in=no;_gh_sess=eyJzZXXzaW9uX2lkIjoiN2;
tz=Europe/Berlin; _ga =GA1.2.332347814.1422308165;
tz2=Europe/Paris ; _ga2="line1\\012line2";
tz3=Europe/Madrid ;_ga3= GA3.2.332347814.1422308165;
_gat=1;
_octo=GH1.1.201722077.1422308165
""",
),
]
environ = testing.create_environ(headers=headers)
req = falcon.Request(environ)
# NOTE(kgriffs): Test case-sensitivity
assert req.get_cookie_values('TZ') is None
assert 'TZ' not in req.cookies
with pytest.raises(KeyError):
req.cookies['TZ']
for name, value in [
('logged_in', 'no'),
('_gh_sess', 'eyJzZXXzaW9uX2lkIjoiN2'),
('tz', 'Europe/Berlin'),
('tz2', 'Europe/Paris'),
('tz3', 'Europe/Madrid'),
('_ga', 'GA1.2.332347814.1422308165'),
('_ga2', 'line1\nline2'),
('_ga3', 'GA3.2.332347814.1422308165'),
('_gat', '1'),
('_octo', 'GH1.1.201722077.1422308165'),
]:
assert name in req.cookies
assert req.cookies[name] == value
assert req.get_cookie_values(name) == [value]
def test_invalid_cookies_are_ignored():
vals = [chr(i) for i in range(0x1F)]
vals += [chr(i) for i in range(0x7F, 0xFF)]
vals += '()<>@,;:\\"/[]?={} \x09'.split()
for c in vals:
headers = [
('Cookie', 'good_cookie=foo;bad' + c + 'cookie=bar'),
]
environ = testing.create_environ(headers=headers)
req = falcon.Request(environ)
assert req.cookies['good_cookie'] == 'foo'
assert 'bad' + c + 'cookie' not in req.cookies
def test_duplicate_cookie():
headers = [
('Cookie', 'x=1;bad{cookie=bar; x=2;x=3 ; x=4;'),
]
environ = testing.create_environ(headers=headers)
req = falcon.Request(environ)
assert req.cookies['x'] == '1'
assert req.get_cookie_values('x') == ['1', '2', '3', '4']
def test_cookie_header_is_missing():
environ = testing.create_environ(headers={})
req = falcon.Request(environ)
assert req.cookies == {}
assert req.get_cookie_values('x') is None
# NOTE(kgriffs): Test again with a new object to cover calling in the
# opposite order.
req = falcon.Request(environ)
assert req.get_cookie_values('x') is None
assert req.cookies == {}
def test_unicode_inside_ascii_range():
resp = falcon.Response()
# should be ok
resp.set_cookie('non_unicode_ascii_name_1', 'ascii_value')
resp.set_cookie('unicode_ascii_name_1', 'ascii_value')
resp.set_cookie('non_unicode_ascii_name_2', 'unicode_ascii_value')
resp.set_cookie('unicode_ascii_name_2', 'unicode_ascii_value')
@pytest.mark.parametrize(
'name', (UNICODE_TEST_STRING, UNICODE_TEST_STRING.encode('utf-8'), 42)
)
def test_non_ascii_name(name):
resp = falcon.Response()
with pytest.raises(KeyError):
resp.set_cookie(name, 'ok_value')
@pytest.mark.parametrize(
'value', (UNICODE_TEST_STRING, UNICODE_TEST_STRING.encode('utf-8'), 42)
)
def test_non_ascii_value(value):
resp = falcon.Response()
# NOTE(tbug): we need to grab the exception to check
# that it is not instance of UnicodeEncodeError, so
# we cannot simply use pytest.raises
try:
resp.set_cookie('ok_name', value)
except ValueError as e:
assert isinstance(e, ValueError)
assert not isinstance(e, UnicodeEncodeError)
else:
pytest.fail('set_bad_cookie_value did not fail as expected')
def test_lax_same_site_value(client):
result = client.simulate_get('/same-site')
cookie = result.cookies['foo']
assert cookie.same_site == 'Lax'
def test_strict_same_site_value(client):
result = client.simulate_post('/same-site')
cookie = result.cookies['bar']
assert cookie.same_site == 'Strict'
def test_none_same_site_value(client):
result = client.simulate_put('/same-site')
cookie = result.cookies['baz']
assert cookie.same_site == 'None'
def test_same_site_empty_string(client):
result = client.simulate_delete('/same-site')
cookie = result.cookies['baz']
assert cookie.same_site is None
@pytest.mark.parametrize(
'same_site', ['laX', 'lax', 'STRICT', 'strict', 'None', 'none']
)
def test_same_site_value_case_insensitive(same_site):
resp = falcon.Response()
resp.set_cookie('foo', 'bar', same_site=same_site)
# NOTE(kgriffs): Verify directly, unit-test style, since we
# already tested end-to-end above.
morsel = resp._cookies['foo']
assert morsel['samesite'].lower() == same_site.lower()
@pytest.mark.parametrize('same_site', ['bogus', 'laxx', 'stric'])
def test_invalid_same_site_value(same_site):
resp = falcon.Response()
with pytest.raises(ValueError):
resp.set_cookie('foo', 'bar', same_site=same_site)
def test_partitioned_value(client):
result = client.simulate_get('/partitioned')
cookie = result.cookies['foo']
assert cookie.partitioned
cookie = result.cookies['bar']
assert not cookie.partitioned
cookie = result.cookies['baz']
assert not cookie.partitioned
def test_unset_cookie_deprecation_warning():
resp = falcon.Response()
# Test that using the deprecated 'samesite' parameter raises a warning
with pytest.warns(
DeprecatedWarning, match='The "samesite" parameter is deprecated'
):
resp.unset_cookie('test', samesite='Strict')
# Verify the cookie was still set correctly with the deprecated parameter
morsel = resp._cookies['test']
assert morsel['samesite'] == 'Strict'
# Test that using the new 'same_site' parameter doesn't raise a warning
with warnings.catch_warnings():
warnings.simplefilter('error')
resp.unset_cookie('test2', same_site='Lax')
# Verify the cookie was set correctly with the new parameter
morsel2 = resp._cookies['test2']
assert morsel2['samesite'] == 'Lax'
# Test that when both parameters are provided, deprecated one is used with warning
with pytest.warns(
DeprecatedWarning, match='The "samesite" parameter is deprecated'
):
resp.unset_cookie('test3', samesite='None', same_site='Strict')
# Verify the deprecated parameter value was used
morsel3 = resp._cookies['test3']
assert morsel3['samesite'] == 'None'
| CookieUnsetSameSite |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_ps.py | {
"start": 31727,
"end": 51625
} | class ____(FigureCanvasBase):
fixed_dpi = 72
filetypes = {'ps': 'Postscript',
'eps': 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def _print_ps(
self, fmt, outfile, *,
metadata=None, papertype=None, orientation='portrait',
bbox_inches_restore=None, **kwargs):
dpi = self.figure.dpi
self.figure.dpi = 72 # Override the dpi kwarg
dsc_comments = {}
if isinstance(outfile, (str, os.PathLike)):
filename = pathlib.Path(outfile).name
dsc_comments["Title"] = \
filename.encode("ascii", "replace").decode("ascii")
dsc_comments["Creator"] = (metadata or {}).get(
"Creator",
f"Matplotlib v{mpl.__version__}, https://matplotlib.org/")
# See https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
dsc_comments["CreationDate"] = (
datetime.datetime.fromtimestamp(
int(source_date_epoch),
datetime.timezone.utc).strftime("%a %b %d %H:%M:%S %Y")
if source_date_epoch
else time.ctime())
dsc_comments = "\n".join(
f"%%{k}: {v}" for k, v in dsc_comments.items())
if papertype is None:
papertype = mpl.rcParams['ps.papersize']
papertype = papertype.lower()
_api.check_in_list(['figure', *papersize], papertype=papertype)
orientation = _api.check_getitem(
_Orientation, orientation=orientation.lower())
printer = (self._print_figure_tex
if mpl.rcParams['text.usetex'] else
self._print_figure)
printer(fmt, outfile, dpi=dpi, dsc_comments=dsc_comments,
orientation=orientation, papertype=papertype,
bbox_inches_restore=bbox_inches_restore, **kwargs)
def _print_figure(
self, fmt, outfile, *,
dpi, dsc_comments, orientation, papertype,
bbox_inches_restore=None):
"""
Render the figure to a filesystem path or a file-like object.
Parameters are as for `.print_figure`, except that *dsc_comments* is a
string containing Document Structuring Convention comments,
generated from the *metadata* parameter to `.print_figure`.
"""
is_eps = fmt == 'eps'
if not (isinstance(outfile, (str, os.PathLike))
or is_writable_file_like(outfile)):
raise ValueError("outfile must be a path or a file-like object")
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if is_eps or papertype == 'figure':
paper_width, paper_height = width, height
else:
paper_width, paper_height = orientation.swap_if_landscape(
papersize[papertype])
# center the figure on the paper
xo = 72 * 0.5 * (paper_width - width)
yo = 72 * 0.5 * (paper_height - height)
llx = xo
lly = yo
urx = llx + self.figure.bbox.width
ury = lly + self.figure.bbox.height
rotation = 0
if orientation is _Orientation.landscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72 * paper_height - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
self._pswriter = StringIO()
# mixed mode rendering
ps_renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(
self.figure, width, height, dpi, ps_renderer,
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
def print_figure_impl(fh):
# write the PostScript headers
if is_eps:
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
else:
print("%!PS-Adobe-3.0", file=fh)
if papertype != 'figure':
print(f"%%DocumentPaperSizes: {papertype}", file=fh)
print("%%Pages: 1", file=fh)
print(f"%%LanguageLevel: 3\n"
f"{dsc_comments}\n"
f"%%Orientation: {orientation.name}\n"
f"{_get_bbox_header(bbox)}\n"
f"%%EndComments\n",
end="", file=fh)
Ndict = len(_psDefs)
print("%%BeginProlog", file=fh)
if not mpl.rcParams['ps.useafm']:
Ndict += len(ps_renderer._character_tracker.used)
print("/mpldict %d dict def" % Ndict, file=fh)
print("mpldict begin", file=fh)
print("\n".join(_psDefs), file=fh)
if not mpl.rcParams['ps.useafm']:
for font_path, chars \
in ps_renderer._character_tracker.used.items():
if not chars:
continue
fonttype = mpl.rcParams['ps.fonttype']
# Can't use more than 255 chars from a single Type 3 font.
if len(chars) > 255:
fonttype = 42
fh.flush()
if fonttype == 3:
fh.write(_font_to_ps_type3(font_path, chars))
else: # Type 42 only.
_font_to_ps_type42(font_path, chars, fh)
print("end", file=fh)
print("%%EndProlog", file=fh)
if not is_eps:
print("%%Page: 1 1", file=fh)
print("mpldict begin", file=fh)
print("%s translate" % _nums_to_str(xo, yo), file=fh)
if rotation:
print("%d rotate" % rotation, file=fh)
print(f"0 0 {_nums_to_str(width*72, height*72)} rectclip", file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
print("end", file=fh)
print("showpage", file=fh)
if not is_eps:
print("%%EOF", file=fh)
fh.flush()
if mpl.rcParams['ps.usedistiller']:
# We are going to use an external program to process the output.
# Write to a temporary file.
with TemporaryDirectory() as tmpdir:
tmpfile = os.path.join(tmpdir, "tmp.ps")
with open(tmpfile, 'w', encoding='latin-1') as fh:
print_figure_impl(fh)
if mpl.rcParams['ps.usedistiller'] == 'ghostscript':
_try_distill(gs_distill,
tmpfile, is_eps, ptype=papertype, bbox=bbox)
elif mpl.rcParams['ps.usedistiller'] == 'xpdf':
_try_distill(xpdf_distill,
tmpfile, is_eps, ptype=papertype, bbox=bbox)
_move_path_to_path_or_stream(tmpfile, outfile)
else: # Write directly to outfile.
with cbook.open_file_cm(outfile, "w", encoding="latin-1") as file:
if not file_requires_unicode(file):
file = codecs.getwriter("latin-1")(file)
print_figure_impl(file)
def _print_figure_tex(
self, fmt, outfile, *,
dpi, dsc_comments, orientation, papertype,
bbox_inches_restore=None):
"""
If :rc:`text.usetex` is True, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
The rest of the behavior is as for `._print_figure`.
"""
is_eps = fmt == 'eps'
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
llx = xo
lly = yo
urx = llx + self.figure.bbox.width
ury = lly + self.figure.bbox.height
bbox = (llx, lly, urx, ury)
self._pswriter = StringIO()
# mixed mode rendering
ps_renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
# write to a temp file, we'll move it to outfile when done
with TemporaryDirectory() as tmpdir:
tmppath = pathlib.Path(tmpdir, "tmp.ps")
tmppath.write_text(
f"""\
%!PS-Adobe-3.0 EPSF-3.0
%%LanguageLevel: 3
{dsc_comments}
{_get_bbox_header(bbox)}
%%EndComments
%%BeginProlog
/mpldict {len(_psDefs)} dict def
mpldict begin
{"".join(_psDefs)}
end
%%EndProlog
mpldict begin
{_nums_to_str(xo, yo)} translate
0 0 {_nums_to_str(width*72, height*72)} rectclip
{self._pswriter.getvalue()}
end
showpage
""",
encoding="latin-1")
if orientation is _Orientation.landscape: # now, ready to rotate
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if is_eps. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if is_eps or papertype == 'figure':
paper_width, paper_height = orientation.swap_if_landscape(
self.figure.get_size_inches())
else:
paper_width, paper_height = papersize[papertype]
psfrag_rotated = _convert_psfrags(
tmppath, ps_renderer.psfrag, paper_width, paper_height,
orientation.name)
if (mpl.rcParams['ps.usedistiller'] == 'ghostscript'
or mpl.rcParams['text.usetex']):
_try_distill(gs_distill,
tmppath, is_eps, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif mpl.rcParams['ps.usedistiller'] == 'xpdf':
_try_distill(xpdf_distill,
tmppath, is_eps, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
_move_path_to_path_or_stream(tmppath, outfile)
print_ps = functools.partialmethod(_print_ps, "ps")
print_eps = functools.partialmethod(_print_ps, "eps")
def draw(self):
self.figure.draw_without_rendering()
return super().draw()
def _convert_psfrags(tmppath, psfrags, paper_width, paper_height, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
with mpl.rc_context({
"text.latex.preamble":
mpl.rcParams["text.latex.preamble"] +
mpl.texmanager._usepackage_if_not_loaded("color") +
mpl.texmanager._usepackage_if_not_loaded("graphicx") +
mpl.texmanager._usepackage_if_not_loaded("psfrag") +
r"\geometry{papersize={%(width)sin,%(height)sin},margin=0in}"
% {"width": paper_width, "height": paper_height}
}):
dvifile = TexManager().make_dvi(
"\n"
r"\begin{figure}""\n"
r" \centering\leavevmode""\n"
r" %(psfrags)s""\n"
r" \includegraphics*[angle=%(angle)s]{%(epsfile)s}""\n"
r"\end{figure}"
% {
"psfrags": "\n".join(psfrags),
"angle": 90 if orientation == 'landscape' else 0,
"epsfile": tmppath.resolve().as_posix(),
},
fontsize=10) # tex's default fontsize.
with TemporaryDirectory() as tmpdir:
psfile = os.path.join(tmpdir, "tmp.ps")
cbook._check_and_log_subprocess(
['dvips', '-q', '-R0', '-o', psfile, dvifile], _log)
shutil.move(psfile, tmppath)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in, 5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
with open(tmppath) as fh:
psfrag_rotated = "Landscape" in fh.read(1000)
return psfrag_rotated
def _try_distill(func, tmppath, *args, **kwargs):
try:
func(str(tmppath), *args, **kwargs)
except mpl.ExecutableNotFoundError as exc:
_log.warning("%s. Distillation step skipped.", exc)
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps:
paper_option = ["-dEPSCrop"]
elif ptype == "figure":
# The bbox will have its lower-left corner at (0, 0), so upper-right
# corner corresponds with paper size.
paper_option = [f"-dDEVICEWIDTHPOINTS={bbox[2]}",
f"-dDEVICEHEIGHTPOINTS={bbox[3]}"]
else:
paper_option = [f"-sPAPERSIZE={ptype}"]
psfile = tmpfile + '.ps'
dpi = mpl.rcParams['ps.distiller.res']
cbook._check_and_log_subprocess(
[mpl._get_executable_info("gs").executable,
"-dBATCH", "-dNOPAUSE", "-r%d" % dpi, "-sDEVICE=ps2write",
*paper_option, f"-sOutputFile={psfile}", tmpfile],
_log)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in a ps file where the
# original bbox is no more correct. Do not adjust bbox for now.
pstoeps(tmpfile, bbox, rotated=rotated)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
mpl._get_executable_info("gs") # Effectively checks for ps2pdf.
mpl._get_executable_info("pdftops")
if eps:
paper_option = ["-dEPSCrop"]
elif ptype == "figure":
# The bbox will have its lower-left corner at (0, 0), so upper-right
# corner corresponds with paper size.
paper_option = [f"-dDEVICEWIDTHPOINTS#{bbox[2]}",
f"-dDEVICEHEIGHTPOINTS#{bbox[3]}"]
else:
paper_option = [f"-sPAPERSIZE#{ptype}"]
with TemporaryDirectory() as tmpdir:
tmppdf = pathlib.Path(tmpdir, "tmp.pdf")
tmpps = pathlib.Path(tmpdir, "tmp.ps")
# Pass options as `-foo#bar` instead of `-foo=bar` to keep Windows
# happy (https://ghostscript.com/doc/9.56.1/Use.htm#MS_Windows).
cbook._check_and_log_subprocess(
["ps2pdf",
"-dAutoFilterColorImages#false",
"-dAutoFilterGrayImages#false",
"-sAutoRotatePages#None",
"-sGrayImageFilter#FlateEncode",
"-sColorImageFilter#FlateEncode",
*paper_option,
tmpfile, tmppdf], _log)
cbook._check_and_log_subprocess(
["pdftops", "-paper", "match", "-level3", tmppdf, tmpps], _log)
shutil.move(tmpps, tmpfile)
if eps:
pstoeps(tmpfile)
def _get_bbox_header(lbrt):
"""Return a PostScript header string for bounding box *lbrt*=(l, b, r, t)."""
l, b, r, t = lbrt
return (f"%%BoundingBox: {int(l)} {int(b)} {math.ceil(r)} {math.ceil(t)}\n"
f"%%HiResBoundingBox: {l:.6f} {b:.6f} {r:.6f} {t:.6f}")
def _get_rotate_command(lbrt):
"""Return a PostScript 90° rotation command for bounding box *lbrt*=(l, b, r, t)."""
l, b, r, t = lbrt
return f"{l+r:.2f} {0:.2f} translate\n90 rotate"
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
epsfile = tmpfile + '.eps'
with open(epsfile, 'wb') as epsh, open(tmpfile, 'rb') as tmph:
write = epsh.write
# Modify the header:
for line in tmph:
if line.startswith(b'%!PS'):
write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
if bbox:
write(_get_bbox_header(bbox).encode('ascii') + b'\n')
elif line.startswith(b'%%EndComments'):
write(line)
write(b'%%BeginProlog\n'
b'save\n'
b'countdictstack\n'
b'mark\n'
b'newpath\n'
b'/showpage {} def\n'
b'/setpagedevice {pop} def\n'
b'%%EndProlog\n'
b'%%Page 1 1\n')
if rotated: # The output eps file need to be rotated.
write(_get_rotate_command(bbox).encode('ascii') + b'\n')
break
elif bbox and line.startswith((b'%%Bound', b'%%HiResBound',
b'%%DocumentMedia', b'%%Pages')):
pass
else:
write(line)
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
for line in tmph:
if line.startswith(b'%%EOF'):
write(b'cleartomark\n'
b'countdictstack\n'
b'exch sub { end } repeat\n'
b'restore\n'
b'showpage\n'
b'%%EOF\n')
elif line.startswith(b'%%PageBoundingBox'):
pass
else:
write(line)
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
FigureManagerPS = FigureManagerBase
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# https://www.adobe.com/content/dam/acom/en/devnet/actionscript/articles/PLRM.pdf
# http://preserve.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
_psDefs = [
# name proc *_d* -
# Note that this cannot be bound to /d, because when embedding a Type3 font
# we may want to define a "d" glyph using "/d{...} d" which would locally
# overwrite the definition.
"/_d { bind def } bind def",
# x y *m* -
"/m { moveto } _d",
# x y *l* -
"/l { lineto } _d",
# x y *r* -
"/r { rlineto } _d",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } _d",
# *cl* -
"/cl { closepath } _d",
# *ce* -
"/ce { closepath eofill } _d",
# wx wy llx lly urx ury *setcachedevice* -
"/sc { setcachedevice } _d",
]
@_Backend.export
| FigureCanvasPS |
python | spack__spack | lib/spack/spack/test/error_messages.py | {
"start": 3190,
"end": 3440
} | class ____(Package):
version("2.1")
version("2.0")
variant("v1", default=True)
depends_on("t2")
depends_on("t2@:2.0", when="@:2.0")
depends_on("t3")
depends_on("t3~v1", when="@2.0")
""",
)
_pkgt3 = (
"t3",
"""\
| T4 |
python | plotly__plotly.py | plotly/graph_objs/layout/ternary/caxis/_title.py | {
"start": 235,
"end": 2875
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.ternary.caxis"
_path_str = "layout.ternary.caxis.title"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this axis' title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.caxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.ternary.caxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of this axis.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font.
text
Sets the title of this axis.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.ternary.caxis.Title`
font
Sets this axis' title font.
text
Sets the title of this axis.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.ternary.caxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.caxis.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | PrefectHQ__prefect | tests/cli/test_api_command.py | {
"start": 15355,
"end": 16920
} | class ____:
"""Test edge cases and special scenarios."""
def test_invalid_http_method(self, respx_mock: MockRouter) -> None:
"""Test invalid HTTP method shows helpful error."""
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "INVALID", "/flows"],
expected_code=1,
)
assert "Invalid HTTP method" in result.output
def test_empty_response_body(self, respx_mock: MockRouter) -> None:
"""Test handling of empty response bodies."""
respx_mock.delete("http://localhost:4200/api/flows/123").mock(
return_value=httpx.Response(204)
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
invoke_and_assert(
["api", "DELETE", "/flows/123"],
expected_code=0,
)
def test_non_json_response(self, respx_mock: MockRouter) -> None:
"""Test handling of non-JSON responses."""
respx_mock.get("http://localhost:4200/api/some-text-endpoint").mock(
return_value=httpx.Response(
200, text="Plain text response", headers={"content-type": "text/plain"}
)
)
with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}):
result = invoke_and_assert(
["api", "GET", "/some-text-endpoint"],
expected_code=0,
)
assert "Plain text response" in result.output
| TestEdgeCases |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 88413,
"end": 89616
} | class ____(Affine2DBase):
"""
`BboxTransformTo` is a transformation that linearly transforms points from
the unit bounding box to a given `Bbox`.
"""
is_separable = True
def __init__(self, boxout, **kwargs):
"""
Create a new `BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
_api.check_isinstance(BboxBase, boxout=boxout)
super().__init__(**kwargs)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
__str__ = _make_str_method("_boxout")
def get_matrix(self):
# docstring inherited
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
float)
self._inverted = None
self._invalid = 0
return self._mtx
| BboxTransformTo |
python | huggingface__transformers | src/transformers/models/csm/processing_csm.py | {
"start": 1169,
"end": 1271
} | class ____(AudioKwargs, total=False):
encoded_length_kwargs: Optional[dict[str, Any]]
| CsmAudioKwargs |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/concurrency.py | {
"start": 1607,
"end": 1778
} | class ____:
run_id: str
step_key: str
enqueued_timestamp: datetime
assigned_timestamp: Optional[datetime]
priority: Optional[int]
@record
| PendingStepInfo |
python | numba__numba | numba/tests/test_support.py | {
"start": 13232,
"end": 14139
} | class ____(TestCase):
def test_assertRefCount(self):
# Use objects to avoid interning
x = object()
y = object()
l = []
with self.assertRefCount(x, y):
pass
with self.assertRaises(AssertionError) as cm:
# y gains a reference
with self.assertRefCount(x, y):
l.append(y)
self.assertIn(str(y), str(cm.exception))
def test_forbid_codegen(self):
"""
Test that forbid_codegen() prevents code generation using the @jit
decorator.
"""
def f():
return 1
with forbid_codegen():
with self.assertRaises(RuntimeError) as raises:
cfunc = jit(nopython=True)(f)
cfunc()
self.assertIn("codegen forbidden by test case", str(raises.exception))
if __name__ == '__main__':
unittest.main()
| TestMisc |
python | doocs__leetcode | solution/1300-1399/1399.Count Largest Group/Solution.py | {
"start": 0,
"end": 414
} | class ____:
def countLargestGroup(self, n: int) -> int:
cnt = Counter()
ans = mx = 0
for i in range(1, n + 1):
s = 0
while i:
s += i % 10
i //= 10
cnt[s] += 1
if mx < cnt[s]:
mx = cnt[s]
ans = 1
elif mx == cnt[s]:
ans += 1
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/minimum-runes-to-add-to-cast-spell.py | {
"start": 1225,
"end": 2054
} | class ____(object):
def minRunesToAdd(self, n, crystals, flowFrom, flowTo):
"""
:type n: int
:type crystals: List[int]
:type flowFrom: List[int]
:type flowTo: List[int]
:rtype: int
"""
adj = [[] for _ in xrange(n)]
for i in xrange(len(flowFrom)):
adj[flowFrom[i]].append(flowTo[i])
lookup = [-1]*n
sccs = strongly_connected_components(adj)
for i, scc in enumerate(sccs):
for x in scc:
lookup[x] = i
result = [False]*len(sccs)
for u in xrange(n):
for v in adj[u]:
if lookup[v] != lookup[u]:
result[lookup[v]] = True
for x in crystals:
result[lookup[x]] = True
return sum(not x for x in result)
| Solution |
python | pytorch__pytorch | test/distributed/test_functional_api.py | {
"start": 6309,
"end": 9105
} | class ____(MultiThreadedTestCase):
@property
def world_size(self):
return 4
def setUp(self):
super().setUp()
self._spawn_threads()
"""
The behavior we want is as follow:
- rankset+tag will always result in the same PG.
Do we enforce this by failing creation of new PGs or returning existing ones?
Return existing one.
- default tag gives existing behavior.
This means we should create duplicates.
- _expand_group on _default-tagged pg should always resolve to it
This mean we can't depend on empty tag + rankset.
"""
def test_pg_creation_with_tag(self):
my_group, _ = new_subgroups(group_size=2, pg_tag="blu")
my_group2, _ = new_subgroups(group_size=2, pg_tag="blu")
self.assertEqual(my_group, my_group2)
my_group3, _ = new_subgroups(group_size=2, pg_tag="blu2")
self.assertNotEqual(my_group, my_group3)
my_group4, _ = new_subgroups(group_size=2)
self.assertNotEqual(my_group, my_group4)
my_group5, _ = new_subgroups(group_size=2)
self.assertNotEqual(my_group4, my_group5)
def test_pg_lookup_roundtrip(self):
pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu")
pg_tag1, _ = new_subgroups(group_size=2, pg_tag="blu2")
pg_notag0, _ = new_subgroups(group_size=2)
pg_notag1, _ = new_subgroups(group_size=2)
def roundtrip(pg):
tag, rankset, _ = ft_c._expand_group(pg)
return c10d._find_pg_by_ranks_and_tag(tag, rankset)
self.assertEqual(pg_tag0, roundtrip(pg_tag0))
self.assertEqual(pg_tag1, roundtrip(pg_tag1))
self.assertEqual(pg_notag0, roundtrip(pg_notag0))
self.assertEqual(pg_notag1, roundtrip(pg_notag1))
def test_pg_lookup_with_tag(self):
pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu")
pg_tag1, _ = new_subgroups(group_size=2, pg_tag="bla")
pg_notag0, _ = new_subgroups(group_size=2)
def roundtrip(pg, pg_tag):
tag, rankset, _ = ft_c._expand_group(pg, pg_tag)
return c10d._find_pg_by_ranks_and_tag(tag, rankset)
self.assertEqual(pg_tag0, roundtrip(pg_tag1, "blu"))
self.assertEqual(pg_tag0, roundtrip(pg_notag0, "blu"))
# Cannot erase the tag of a PG
self.assertEqual(pg_tag0, roundtrip(pg_tag0, ""))
def test_find_or_create_pg(self):
pg = c10d._find_or_create_pg_by_ranks_and_tag("blu", [0, 1, 2, 3], 2)
pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu")
self.assertEqual(pg, pg_tag0)
def test_find_root_pg(self):
pg = c10d._find_pg_by_ranks_and_tag("", [0, 1, 2, 3])
self.assertEqual(dist.group.WORLD, pg)
@instantiate_parametrized_tests
@skipIfHpu
| TestPgTag |
python | altair-viz__altair | altair/vegalite/v6/api.py | {
"start": 30073,
"end": 32407
} | class ____(_BaseWhen):
"""
Utility class for ``when-then-otherwise`` conditions.
Represents the state after calling :func:`.when()`.
This partial state requires calling :meth:`When.then()` to finish the condition.
References
----------
`polars.when <https://docs.pola.rs/py-polars/html/reference/expressions/api/polars.when.html>`__
"""
def __init__(self, condition: _Condition, /) -> None:
self._condition = condition
def __repr__(self) -> str:
return f"{type(self).__name__}({self._condition!r})"
@overload
def then(self, statement: str, /, **kwds: Any) -> Then[_Condition]: ...
@overload
def then(self, statement: _Value, /, **kwds: Any) -> Then[_Conditions]: ...
@overload
def then(
self, statement: dict[str, Any] | SchemaBase, /, **kwds: Any
) -> Then[Any]: ...
def then(self, statement: _StatementType, /, **kwds: Any) -> Then[Any]:
"""
Attach a statement to this predicate.
Parameters
----------
statement
A spec or value to use when the preceding :func:`.when()` clause is true.
.. note::
``str`` will be encoded as `shorthand`_.
**kwds
Additional keyword args are added to the resulting ``dict``.
Returns
-------
:class:`Then`
.. _shorthand:
https://altair-viz.github.io/user_guide/encodings/index.html#encoding-shorthands
Examples
--------
Simple conditions may be expressed without defining a default::
import altair as alt
from altair.datasets import data
source = data.movies()
predicate = (alt.datum["IMDB Rating"] == None) | (
alt.datum["Rotten Tomatoes Rating"] == None
)
alt.Chart(source).mark_point(invalid=None).encode(
x="IMDB Rating:Q",
y="Rotten Tomatoes Rating:Q",
color=alt.when(predicate).then(alt.value("grey")),
)
"""
condition = self._when_then(statement, kwds)
if _is_condition_extra(condition, statement, kwds=kwds):
return Then(_Conditional(condition=condition))
else:
return Then(_Conditional(condition=[condition]))
| When |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol22.py | {
"start": 1126,
"end": 1211
} | class ____(Protocol[_T1, _T2]):
def m2(self, a: _T1 | _T2) -> tuple[_T1, _T2]: ...
| P4 |
python | doocs__leetcode | solution/0300-0399/0343.Integer Break/Solution2.py | {
"start": 0,
"end": 258
} | class ____:
def integerBreak(self, n: int) -> int:
if n < 4:
return n - 1
if n % 3 == 0:
return pow(3, n // 3)
if n % 3 == 1:
return pow(3, n // 3 - 1) * 4
return pow(3, n // 3) * 2
| Solution |
python | pytest-dev__pytest-xdist | src/xdist/scheduler/loadgroup.py | {
"start": 131,
"end": 2273
} | class ____(LoadScopeScheduling):
"""Implement load scheduling across nodes, but grouping test by xdist_group mark.
This class behaves very much like LoadScopeScheduling, but it groups tests by xdist_group mark
instead of the module or class to which they belong to.
"""
def __init__(self, config: pytest.Config, log: Producer | None = None) -> None:
super().__init__(config, log)
if log is None:
self.log = Producer("loadgroupsched")
else:
self.log = log.loadgroupsched
def _split_scope(self, nodeid: str) -> str:
"""Determine the scope (grouping) of a nodeid.
There are usually 3 cases for a nodeid::
example/loadsuite/test/test_beta.py::test_beta0
example/loadsuite/test/test_delta.py::Delta1::test_delta0
example/loadsuite/epsilon/__init__.py::epsilon.epsilon
#. Function in a test module.
#. Method of a class in a test module.
#. Doctest in a function in a package.
With loadgroup, two cases are added::
example/loadsuite/test/test_beta.py::test_beta0
example/loadsuite/test/test_delta.py::Delta1::test_delta0
example/loadsuite/epsilon/__init__.py::epsilon.epsilon
example/loadsuite/test/test_gamma.py::test_beta0@gname
example/loadsuite/test/test_delta.py::Gamma1::test_gamma0@gname
This function will group tests with the scope determined by splitting the first ``@``
from the right. That is, test will be grouped in a single work unit when they have
same group name. In the above example, scopes will be::
example/loadsuite/test/test_beta.py::test_beta0
example/loadsuite/test/test_delta.py::Delta1::test_delta0
example/loadsuite/epsilon/__init__.py::epsilon.epsilon
gname
gname
"""
if nodeid.rfind("@") > nodeid.rfind("]"):
# check the index of ']' to avoid the case: parametrize mark value has '@'
return nodeid.split("@")[-1]
else:
return nodeid
| LoadGroupScheduling |
python | walkccc__LeetCode | solutions/972. Equal Rational Numbers/972.py | {
"start": 0,
"end": 825
} | class ____:
def isRationalEqual(self, s: str, t: str) -> bool:
ratios = [1, 1 / 9, 1 / 99, 1 / 999, 1 / 9999]
def valueOf(s: str) -> float:
if s.find('(') == -1:
return float(s)
# Get the indices.
leftParenIndex = s.find('(')
rightParenIndex = s.find(')')
dotIndex = s.find('.')
# integerAndNonRepeating := <IntegerPart><.><NonRepeatingPart>
integerAndNonRepeating = float(s[:leftParenIndex])
nonRepeatingLength = leftParenIndex - dotIndex - 1
# repeating := <RepeatingPart>
repeating = int(s[leftParenIndex + 1:rightParenIndex])
repeatingLength = rightParenIndex - leftParenIndex - 1
return integerAndNonRepeating + repeating * 0.1**nonRepeatingLength * ratios[repeatingLength]
return abs(valueOf(s) - valueOf(t)) < 1e-9
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_bedrock.py | {
"start": 1652,
"end": 2959
} | class ____:
MODEL_ID = "meta.llama2-13b-chat-v1"
TEST_PROMPT = "A very important question."
GENERATED_RESPONSE = "An important answer."
@pytest.fixture
def mock_runtime_conn(self) -> Generator[BaseAwsConnection, None, None]:
with mock.patch.object(BedrockRuntimeHook, "conn") as _conn:
_conn.invoke_model.return_value["body"].read.return_value = json.dumps(
{
"generation": self.GENERATED_RESPONSE,
"prompt_token_count": len(self.TEST_PROMPT),
"generation_token_count": len(self.GENERATED_RESPONSE),
"stop_reason": "stop",
}
)
yield _conn
@pytest.fixture
def runtime_hook(self) -> Generator[BedrockRuntimeHook, None, None]:
with mock_aws():
yield BedrockRuntimeHook(aws_conn_id="aws_default")
def test_invoke_model_prompt_good_combinations(self, mock_runtime_conn):
operator = BedrockInvokeModelOperator(
task_id="test_task",
model_id=self.MODEL_ID,
input_data={"input_data": {"prompt": self.TEST_PROMPT}},
)
response = operator.execute({})
assert response["generation"] == self.GENERATED_RESPONSE
| TestBedrockInvokeModelOperator |
python | doocs__leetcode | solution/1500-1599/1548.The Most Similar Path in a Graph/Solution.py | {
"start": 0,
"end": 969
} | class ____:
def mostSimilar(
self, n: int, roads: List[List[int]], names: List[str], targetPath: List[str]
) -> List[int]:
g = [[] for _ in range(n)]
for a, b in roads:
g[a].append(b)
g[b].append(a)
m = len(targetPath)
f = [[inf] * n for _ in range(m)]
pre = [[-1] * n for _ in range(m)]
for j, s in enumerate(names):
f[0][j] = targetPath[0] != s
for i in range(1, m):
for j in range(n):
for k in g[j]:
if (t := f[i - 1][k] + (targetPath[i] != names[j])) < f[i][j]:
f[i][j] = t
pre[i][j] = k
k = 0
mi = inf
for j in range(n):
if f[-1][j] < mi:
mi = f[-1][j]
k = j
ans = [0] * m
for i in range(m - 1, -1, -1):
ans[i] = k
k = pre[i][k]
return ans
| Solution |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_wasb_delete_blob.py | {
"start": 999,
"end": 2386
} | class ____:
_config = {
"container_name": "container",
"blob_name": "blob",
}
def setup_method(self):
args = {"owner": "airflow", "start_date": datetime.datetime(2017, 1, 1)}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
def test_init(self):
operator = WasbDeleteBlobOperator(task_id="wasb_operator_1", dag=self.dag, **self._config)
assert operator.container_name == self._config["container_name"]
assert operator.blob_name == self._config["blob_name"]
assert operator.is_prefix is False
assert operator.ignore_if_missing is False
operator = WasbDeleteBlobOperator(
task_id="wasb_operator_2", dag=self.dag, is_prefix=True, ignore_if_missing=True, **self._config
)
assert operator.is_prefix is True
assert operator.ignore_if_missing is True
@mock.patch("airflow.providers.microsoft.azure.operators.wasb_delete_blob.WasbHook", autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = WasbDeleteBlobOperator(
task_id="wasb_operator", dag=self.dag, is_prefix=True, ignore_if_missing=True, **self._config
)
operator.execute(None)
mock_instance.delete_file.assert_called_once_with("container", "blob", True, True)
| TestWasbDeleteBlobOperator |
python | getsentry__sentry | tests/sentry/seer/explorer/test_tools.py | {
"start": 51540,
"end": 59580
} | class ____(APITestCase, SpanTestCase, SnubaTestCase):
def setUp(self):
super().setUp()
self.ten_mins_ago = before_now(minutes=10)
@patch("sentry.seer.explorer.tools._convert_profile_to_execution_tree")
@patch("sentry.seer.explorer.tools.fetch_profile_data")
def test_rpc_get_profile_flamegraph_finds_transaction_profile(
self, mock_fetch_profile, mock_convert_tree
):
"""Test finding transaction profile via profile.id with wildcard query"""
profile_id_8char = "a1b2c3d4"
full_profile_id = profile_id_8char + "e5f6789012345678901234567"
# Create span with profile_id (top-level field)
span = self.create_span(
{
"description": "test span",
"profile_id": full_profile_id,
},
start_ts=self.ten_mins_ago,
duration=100,
)
self.store_spans([span], is_eap=True)
# Mock the profile data fetch and conversion
mock_fetch_profile.return_value = {"profile": {"frames": [], "stacks": [], "samples": []}}
mock_convert_tree.return_value = ([{"function": "main", "module": "app"}], "1")
result = rpc_get_profile_flamegraph(profile_id_8char, self.organization.id)
# Should find the profile via wildcard query
assert "execution_tree" in result
assert result["metadata"]["profile_id"] == full_profile_id
assert result["metadata"]["is_continuous"] is False
@patch("sentry.seer.explorer.tools._convert_profile_to_execution_tree")
@patch("sentry.seer.explorer.tools.fetch_profile_data")
def test_rpc_get_profile_flamegraph_finds_continuous_profile(
self, mock_fetch_profile, mock_convert_tree
):
"""Test finding continuous profile via profiler.id with wildcard query"""
profiler_id_8char = "b1c2d3e4"
full_profiler_id = profiler_id_8char + "f5a6b7c8d9e0f1a2b3c4d5e6"
# Create span with profiler_id in sentry_tags (continuous profile)
# Set profile_id to None since continuous profiles use profiler_id instead
span = self.create_span(
{
"description": "continuous span",
"profile_id": None,
"sentry_tags": {
"profiler_id": full_profiler_id,
},
},
start_ts=self.ten_mins_ago,
duration=200,
)
self.store_spans([span], is_eap=True)
# Mock the profile data
mock_fetch_profile.return_value = {
"chunk": {"profile": {"frames": [], "stacks": [], "samples": []}}
}
mock_convert_tree.return_value = ([{"function": "worker", "module": "tasks"}], "2")
result = rpc_get_profile_flamegraph(profiler_id_8char, self.organization.id)
# Should find via profiler.id and identify as continuous
assert "execution_tree" in result
assert result["metadata"]["profile_id"] == full_profiler_id
assert result["metadata"]["is_continuous"] is True
@patch("sentry.seer.explorer.tools._convert_profile_to_execution_tree")
@patch("sentry.seer.explorer.tools.fetch_profile_data")
def test_rpc_get_profile_flamegraph_aggregates_timestamps_across_spans(
self, mock_fetch_profile, mock_convert_tree
):
"""Test that min/max timestamps are aggregated across multiple spans with same profile"""
profile_id_8char = "c1d2e3f4"
full_profile_id = profile_id_8char + "a5b6c7d8e9f0a1b2c3d4e5f6"
# Create multiple spans with the same profile at different times
span1_time = self.ten_mins_ago
span2_time = self.ten_mins_ago + timedelta(minutes=2)
span3_time = self.ten_mins_ago + timedelta(minutes=5)
spans = [
self.create_span(
{
"description": f"span-{i}",
"profile_id": full_profile_id,
},
start_ts=start_time,
duration=100,
)
for i, start_time in enumerate([span1_time, span2_time, span3_time])
]
self.store_spans(spans, is_eap=True)
mock_fetch_profile.return_value = {"profile": {"frames": [], "stacks": [], "samples": []}}
mock_convert_tree.return_value = ([{"function": "test", "module": "test"}], "3")
result = rpc_get_profile_flamegraph(profile_id_8char, self.organization.id)
# Verify the aggregate query worked and got min/max timestamps
assert "execution_tree" in result
metadata = result["metadata"]
assert metadata["profile_id"] == full_profile_id
# Should have aggregated start_ts and end_ts from all spans
assert metadata["start_ts"] is not None
assert metadata["end_ts"] is not None
# The min should be from span1, max from span3
assert metadata["start_ts"] <= metadata["end_ts"]
@patch("sentry.seer.explorer.tools._convert_profile_to_execution_tree")
@patch("sentry.seer.explorer.tools.fetch_profile_data")
def test_rpc_get_profile_flamegraph_sliding_window_finds_old_profile(
self, mock_fetch_profile, mock_convert_tree
):
"""Test that sliding 14-day windows can find profiles from 20 days ago"""
profile_id_8char = "d1e2f3a4"
full_profile_id = profile_id_8char + "b5c6d7e8f9a0b1c2d3e4f5a6"
twenty_days_ago = before_now(days=20)
# Create span 20 days ago
span = self.create_span(
{
"description": "old span",
"profile_id": full_profile_id,
},
start_ts=twenty_days_ago,
duration=150,
)
self.store_spans([span], is_eap=True)
mock_fetch_profile.return_value = {"profile": {"frames": [], "stacks": [], "samples": []}}
mock_convert_tree.return_value = ([{"function": "old_function", "module": "old"}], "4")
result = rpc_get_profile_flamegraph(profile_id_8char, self.organization.id)
# Should find it via sliding window (second 14-day window)
assert "execution_tree" in result
assert result["metadata"]["profile_id"] == full_profile_id
@patch("sentry.seer.explorer.tools._convert_profile_to_execution_tree")
@patch("sentry.seer.explorer.tools.fetch_profile_data")
def test_rpc_get_profile_flamegraph_full_32char_id(self, mock_fetch_profile, mock_convert_tree):
"""Test with full 32-character profile ID (no wildcard needed)"""
full_profile_id = "e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6"
span = self.create_span(
{
"description": "test span",
"profile_id": full_profile_id,
},
start_ts=self.ten_mins_ago,
duration=100,
)
self.store_spans([span], is_eap=True)
mock_fetch_profile.return_value = {"profile": {"frames": [], "stacks": [], "samples": []}}
mock_convert_tree.return_value = ([{"function": "handler", "module": "server"}], "5")
result = rpc_get_profile_flamegraph(full_profile_id, self.organization.id)
# Should work with full ID
assert "execution_tree" in result
assert result["metadata"]["profile_id"] == full_profile_id
def test_rpc_get_profile_flamegraph_not_found_in_90_days(self):
"""Test when profile ID doesn't match any spans in 90-day window"""
# Create a span without the profile we're looking for
span = self.create_span(
{
"description": "unrelated span",
"profile_id": "different12345678901234567890123",
},
start_ts=self.ten_mins_ago,
duration=100,
)
self.store_spans([span], is_eap=True)
result = rpc_get_profile_flamegraph("notfound", self.organization.id)
# Should return error indicating not found
assert "error" in result
assert "not found in the last 90 days" in result["error"]
| TestRpcGetProfileFlamegraph |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2_test.py | {
"start": 56560,
"end": 94100
} | class ____(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegex(ValueError,
'feature_columns must not be empty'):
fc_old.linear_model(features={}, feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegex(ValueError, 'must be a _FeatureColumn'):
fc_old.linear_model(features={'a': [[0]]}, feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(
BaseFeatureColumnForTests,
feature_column_v2_types.FeatureColumn,
fc_old._FeatureColumn,
):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'NotSupportedColumn'
def transform_feature(self, transformation_cache, state_manager):
pass
def _transform_feature(self, inputs):
pass
@property
def parse_example_spec(self):
pass
@property
def _parse_example_spec(self):
pass
with self.assertRaisesRegex(
ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
fc_old.linear_model(
features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegex(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc_old.linear_model(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegex(
ValueError, 'Duplicate feature column name found for columns'):
fc_old.linear_model(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_dense_bias(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc_old.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], self.evaluate(predictions))
def test_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc_old.linear_model(features, [wire_cast])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.], [0.], [0.]],
self.evaluate(wire_cast_var))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions))
def test_dense_and_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
price = fc.numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
predictions = fc_old.linear_model(features, [wire_cast, price])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions))
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(BaseFeatureColumnForTests, fc.DenseColumn,
fc.CategoricalColumn, fc_old._DenseColumn,
fc_old._CategoricalColumn):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'dense_and_sparse_column'
@property
def parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
@property
def _parse_example_spec(self):
return self.parse_example_spec
def transform_feature(self, transformation_cache, state_manager):
raise ValueError('Should not use this method.')
def _transform_feature(self, inputs):
return inputs.get(self.name)
@property
def variable_shape(self):
return self.variable_shape
@property
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
raise ValueError('Should not use this method.')
def _get_dense_tensor(self, inputs):
raise ValueError('Should not use this method.')
@property
def num_buckets(self):
return 4
@property
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
raise ValueError('Should not use this method.')
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return fc.CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
predictions = fc_old.linear_model(features, [dense_and_sparse_column])
bias = get_linear_model_bias()
dense_and_sparse_column_var = get_linear_model_column_var(
dense_and_sparse_column)
with _initialized_session() as sess:
sess.run(
dense_and_sparse_column_var.assign([[10.], [100.], [1000.],
[10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions))
def test_dense_multi_output(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc_old.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var))
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
self.evaluate(predictions))
def test_sparse_multi_output(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc_old.linear_model(features, [wire_cast], units=3)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var))
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.],
[1000., 1100., 1200.],
[10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
self.evaluate(predictions))
def test_dense_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc_old.linear_model(features, [price])
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], self.evaluate(price_var))
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], self.evaluate(predictions))
def test_sparse_multi_rank(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc_old.linear_model(features, [wire_cast])
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var))
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc_old.linear_model(
features, [wire_cast], sparse_combiner='mean')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions))
def test_sparse_combiner_with_negative_weights(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
wire_cast_weights = fc.weighted_categorical_column(wire_cast, 'weights')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {
'wire_cast': wire_tensor,
'weights': constant_op.constant([[1., 1., -1.0]])
}
predictions = fc_old.linear_model(
features, [wire_cast_weights], sparse_combiner='sum')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [-9985.]], self.evaluate(predictions))
def test_dense_multi_dimension_multi_output(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc_old.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var))
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
self.evaluate(predictions))
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegex(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc_old.linear_model(features, [price])
def test_dense_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
predictions = fc_old.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.]], self.evaluate(price_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], self.evaluate(predictions))
def test_dense_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
predictions = fc_old.linear_model(features, [price1, price2])
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.]], self.evaluate(price1_var))
self.assertAllClose([[0.]], self.evaluate(price2_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions))
def test_fills_cols_to_vars(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
cols_to_vars = {}
fc_old.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars)
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
self.assertEqual(cols_to_vars['bias'], [bias])
self.assertEqual(cols_to_vars[price1], [price1_var])
self.assertEqual(cols_to_vars[price2], [price2_var])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2', shape=3)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [6., 7.]],
'price2': [[3., 4., 5.], [8., 9., 10.]]
}
cols_to_vars = {}
with variable_scope.variable_scope(
'linear',
partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
fc_old.linear_model(
features, [price1, price2], cols_to_vars=cols_to_vars)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertEqual([0.], self.evaluate(cols_to_vars['bias'][0]))
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price1][0]))
self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price1][1]))
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], self.evaluate(cols_to_vars[price2][0]))
self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price2][1]))
def test_fills_cols_to_output_tensors(self):
# Provide three _DenseColumn's to input_layer: a _NumericColumn, a
# _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn
# creates a Variable.
apple_numeric_column = fc.numeric_column('apple_numeric_column')
banana_dense_feature = fc.numeric_column('banana_dense_feature')
banana_dense_feature_bucketized = fc.bucketized_column(
banana_dense_feature, boundaries=[0.])
cherry_sparse_column = fc.categorical_column_with_hash_bucket(
'cherry_sparse_feature', hash_bucket_size=5)
dragonfruit_embedding_column = fc.embedding_column(
cherry_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'apple_numeric_column': [[3.], [4.]],
'banana_dense_feature': [[-1.], [4.]],
'cherry_sparse_feature': [['a'], ['x']],
}
cols_to_output_tensors = {}
all_cols = [
apple_numeric_column, banana_dense_feature_bucketized,
dragonfruit_embedding_column
]
input_layer = fc_old.input_layer(
features, all_cols, cols_to_output_tensors=cols_to_output_tensors)
# We check the mapping by checking that we have the right keys,
# and that the values (output_tensors) were indeed the ones used to
# form the input layer.
self.assertCountEqual(all_cols, cols_to_output_tensors.keys())
input_layer_inputs = [tensor for tensor in input_layer.op.inputs[:-1]]
output_tensors = [tensor for tensor in cols_to_output_tensors.values()]
self.assertCountEqual(input_layer_inputs, output_tensors)
def test_dense_collection(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc_old.linear_model(features, [price], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
self.assertIn(bias, my_vars)
self.assertIn(price_var, my_vars)
def test_sparse_collection(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc_old.linear_model(features, [wire_cast], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, my_vars)
self.assertIn(wire_cast_var, my_vars)
def test_dense_trainable_default(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc_old.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc_old.linear_model(features, [wire_cast])
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc_old.linear_model(features, [price], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc_old.linear_model(features, [wire_cast], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc_old.linear_model(
features, [price_a, wire_cast, price_b],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc_old.linear_model(
features, [wire_cast, price_b, price_a],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.linear_model(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.linear_model(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
predictions = fc_old.linear_model(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegex(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
predictions = fc_old.linear_model(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_1d_sparse_tensor(self):
# This test does direct variable scope access + manipulations
# that require entering a legacy graph
with ops.Graph().as_default():
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price':
constant_op.constant([
-1.,
12.,
]),
'body-style':
sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
net = fc_old.linear_model(features, [price_buckets, body_style])
with _initialized_session() as sess:
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
self.evaluate(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
# This test needs to access variables bia variable scope & needs to be
# run inside of a legacy graph
with ops.Graph().as_default():
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array(['US', 'CA'])
net = fc_old.linear_model(features, [price_buckets, body_style, country])
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
fc_old.linear_model(features, [price])
# This test needs to construct graph placeholders
# w/ dynamic rank 0, so we enter a graph
with ops.Graph().as_default():
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc_old.linear_model(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
def test_multiple_linear_models(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features1 = {'price': [[1.], [5.]]}
features2 = {'price': [[2.], [10.]]}
predictions1 = fc_old.linear_model(features1, [price])
predictions2 = fc_old.linear_model(features2, [price])
bias1 = get_linear_model_bias(name='linear_model')
bias2 = get_linear_model_bias(name='linear_model_1')
price_var1 = get_linear_model_column_var(price, name='linear_model')
price_var2 = get_linear_model_column_var(price, name='linear_model_1')
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias1))
sess.run(price_var1.assign([[10.]]))
sess.run(bias1.assign([5.]))
self.assertAllClose([[15.], [55.]], self.evaluate(predictions1))
self.assertAllClose([0.], self.evaluate(bias2))
sess.run(price_var2.assign([[10.]]))
sess.run(bias2.assign([5.]))
self.assertAllClose([[25.], [105.]], self.evaluate(predictions2))
def test_linear_model_v1_shared_embedding_all_other_v2(self):
# SharedEmbeddingColumns are graph-only
with ops.Graph().as_default():
price = fc.numeric_column('price') # v2
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5) # v2
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10) # v2
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3) # v2
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3) # v2
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2) # v1
all_cols = [
price, some_embedding_column, shared_embedding_a, shared_embedding_b
]
features = {
'price': [[3.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc_old.linear_model(features, all_cols)
bias = get_linear_model_bias()
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([0.], self.evaluate(bias))
def test_linear_model_v1_shared_embedding_with_v2_cat_all_other_v2(self):
# SharedEmbeddingColumns are graph-only
with ops.Graph().as_default():
price = fc.numeric_column('price') # v2
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5) # v2
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10) # v2
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3) # v2
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3) # v2
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2) # v1
all_cols = [
price, some_embedding_column, shared_embedding_a, shared_embedding_b
]
features = {
'price': [[3.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc_old.linear_model(features, all_cols)
bias = get_linear_model_bias()
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([0.], self.evaluate(bias))
def test_linear_model_v1_v2_mix(self):
# SharedEmbeddingColumns are graph-only
with ops.Graph().as_default():
price = fc.numeric_column('price') # v2
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5) # v1
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10) # v1
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3) # v2
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3) # v2
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2) # v1
all_cols = [
price, some_embedding_column, shared_embedding_a, shared_embedding_b
]
features = {
'price': [[3.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc_old.linear_model(features, all_cols)
bias = get_linear_model_bias()
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([0.], self.evaluate(bias))
def test_linear_model_v2_shared_embedding_all_other_v1(self):
# SharedEmbeddingColumns are graph-only
with ops.Graph().as_default():
price = fc.numeric_column('price') # v1
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5) # v1
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10) # v1
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3) # v2
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3) # v2
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2) # v2
all_cols = [
price, some_embedding_column, shared_embedding_a, shared_embedding_b
]
features = {
'price': [[3.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
with self.assertRaisesRegex(ValueError,
'SharedEmbeddingColumns are not supported'):
fc_old.linear_model(features, all_cols)
| OldLinearModelTest |
python | yaml__pyyaml | lib/yaml/tokens.py | {
"start": 1359,
"end": 1410
} | class ____(Token):
id = '['
| FlowSequenceStartToken |
python | python-attrs__attrs | typing-examples/baseline.py | {
"start": 1160,
"end": 1556
} | class ____:
num: int | str = attrs.field(
validator=attrs.validators.or_(
# Various types of validators.
attrs.validators.ge(0),
attrs.validators.instance_of(str),
)
)
attrs.validators.set_disabled(True)
attrs.validators.set_disabled(False)
with attrs.validators.disabled():
Validated(num=-1)
@attrs.define
| ValidatedInconsistentOr |
python | getsentry__sentry | tests/sentry/projects/project_rules/test_creator.py | {
"start": 614,
"end": 3598
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.org = self.create_organization(name="bloop", owner=self.user)
self.project = self.create_project(
teams=[self.create_team()], name="foo", fire_project_created=True
)
self.creator = ProjectRuleCreator(
name="New Cool Rule",
owner=Actor.from_id(user_id=self.user.id),
project=self.project,
action_match="any",
filter_match="all",
conditions=[
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
},
{
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "foo",
"match": "is",
},
],
actions=[
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"name": "Send a notification (for all legacy integrations)",
}
],
environment=self.environment.id,
frequency=5,
source=RuleSource.ISSUE,
)
def test_create_rule_and_workflow(self) -> None:
rule = self.creator.run()
rule_id = rule.id
alert_rule_detector = AlertRuleDetector.objects.get(rule_id=rule_id)
alert_rule_workflow = AlertRuleWorkflow.objects.get(rule_id=rule_id)
detector = alert_rule_detector.detector
assert detector.project_id == self.project.id
assert detector.type == ErrorGroupType.slug
assert Detector.objects.get(project=self.project, type=IssueStreamGroupType.slug)
workflow = alert_rule_workflow.workflow
assert workflow.config["frequency"] == 5
assert workflow.owner_user_id == self.user.id
assert workflow.owner_team_id is None
assert workflow.environment_id == self.environment.id
when_dcg = workflow.when_condition_group
assert when_dcg
assert when_dcg.logic_type == "any-short"
assert len(when_dcg.conditions.all()) == 1
data_condition = list(when_dcg.conditions.all())[0]
assert data_condition.type == Condition.FIRST_SEEN_EVENT
action_filter = WorkflowDataConditionGroup.objects.get(workflow=workflow).condition_group
assert action_filter.logic_type == "all"
assert len(action_filter.conditions.all()) == 1
data_condition = list(action_filter.conditions.all())[0]
assert data_condition.type == Condition.TAGGED_EVENT
assert data_condition.comparison == {"key": "foo", "match": "is"}
action = DataConditionGroupAction.objects.get(condition_group=action_filter).action
assert action.type == Action.Type.PLUGIN
| TestProjectRuleCreator |
python | ray-project__ray | python/ray/serve/tests/test_healthcheck.py | {
"start": 655,
"end": 8831
} | class ____:
def __init__(self):
self.healthy = True
self.should_hang = False
def check_health(self):
if self.should_hang:
import time
time.sleep(10000)
elif not self.healthy:
raise Exception("intended to fail")
def __call__(self, *args):
return ray.get_runtime_context().current_actor
def set_should_fail(self):
self.healthy = False
return ray.get_runtime_context().current_actor
def set_should_hang(self):
self.should_hang = True
return ray.get_runtime_context().current_actor
async def check_new_actor_started(handle, original_actors):
if not isinstance(original_actors, set):
original_actors = {original_actors._actor_id}
try:
return (await handle.remote())._actor_id not in original_actors
except RayError:
return False
@pytest.mark.parametrize("use_class", [True, False])
def test_no_user_defined_method(serve_instance, use_class):
"""Check the default behavior when an actor crashes."""
if use_class:
@serve.deployment
class A:
def __call__(self, *args):
return ray.get_runtime_context().current_actor
else:
@serve.deployment
def A(*args):
return ray.get_runtime_context().current_actor
h = serve.run(A.bind())
actor = h.remote().result()
ray.kill(actor)
# This would time out if we wait for multiple health check failures.
wait_for_condition(check_new_actor_started, handle=h, original_actors=actor)
@pytest.mark.asyncio
async def test_user_defined_method_fails(serve_instance):
h = serve.run(Patient.bind())
actor = await h.remote()
await h.set_should_fail.remote()
await async_wait_for_condition(
check_new_actor_started, handle=h, original_actors=actor
)
await asyncio.gather(*[h.remote() for _ in range(100)])
@pytest.mark.asyncio
async def test_user_defined_method_hangs(serve_instance):
h = serve.run(Patient.options(graceful_shutdown_timeout_s=0).bind())
actor = await h.remote()
await h.set_should_hang.remote()
await async_wait_for_condition(
check_new_actor_started, handle=h, original_actors=actor
)
await asyncio.gather(*[h.remote() for _ in range(100)])
@pytest.mark.asyncio
async def test_multiple_replicas(serve_instance):
h = serve.run(Patient.options(num_replicas=2).bind())
actors = {
a._actor_id for a in await asyncio.gather(*[h.remote() for _ in range(100)])
}
assert len(actors) == 2
await h.set_should_fail.remote()
await async_wait_for_condition(
check_new_actor_started, handle=h, original_actors=actors
)
new_actors = {
a._actor_id for a in await asyncio.gather(*[h.remote() for _ in range(100)])
}
assert len(new_actors) == 2
assert len(new_actors.intersection(actors)) == 1
def test_inherit_healthcheck(serve_instance):
class Parent:
def __init__(self):
self.should_fail = False
def check_health(self):
if self.should_fail:
raise Exception("intended to fail")
def set_should_fail(self):
self.should_fail = True
@serve.deployment(health_check_period_s=1)
class Child(Parent):
def __call__(self, *args):
return ray.get_runtime_context().current_actor
h = serve.run(Child.bind())
actors = {h.remote().result()._actor_id for _ in range(100)}
assert len(actors) == 1
h.set_should_fail.remote().result()
wait_for_condition(check_new_actor_started, handle=h, original_actors=actors)
def test_nonconsecutive_failures(serve_instance):
counter = ray.remote(Counter).remote()
# Test that a health check failing every other call isn't marked unhealthy.
@serve.deployment(health_check_period_s=0.1)
class FlakyHealthCheck:
def check_health(self):
curr_count = ray.get(counter.inc.remote())
if curr_count % 2 == 0:
raise Exception("Ah! I had evens!")
def __call__(self, *args):
return ray.get_runtime_context().current_actor
h = serve.run(FlakyHealthCheck.bind())
a1 = h.remote().result()
# Wait for 10 health check periods, should never get marked unhealthy.
wait_for_condition(lambda: ray.get(counter.get.remote()) > 10)
assert h.remote().result()._actor_id == a1._actor_id
def test_consecutive_failures(serve_instance):
# Test that the health check must fail N times before being restarted.
counter = ray.remote(Counter).remote()
@serve.deployment(health_check_period_s=1)
class ChronicallyUnhealthy:
def __init__(self):
self._actor_id = ray.get_runtime_context().current_actor._actor_id
self._should_fail = False
def check_health(self):
if self._should_fail:
ray.get(counter.inc.remote())
raise Exception("intended to fail")
def set_should_fail(self):
self._should_fail = True
return self._actor_id
def __call__(self, *args):
return self._actor_id
h = serve.run(ChronicallyUnhealthy.bind())
def check_fails_3_times():
original_actor_id = h.set_should_fail.remote().result()
# Wait until a new actor is started.
wait_for_condition(lambda: h.remote().result() != original_actor_id)
# Check that the health check failed N times before replica was killed.
assert ray.get(counter.get.remote()) == REPLICA_HEALTH_CHECK_UNHEALTHY_THRESHOLD
# Run the check twice to see that the counter gets reset after a
# replica is killed.
check_fails_3_times()
ray.get(counter.reset.remote())
check_fails_3_times()
def test_health_check_failure_cause_deploy_failure(serve_instance):
"""If a deployment always fails health check, the deployment should be unhealthy."""
@serve.deployment
class AlwaysUnhealthy:
def check_health(self):
raise Exception("intended to fail")
def __call__(self, *args):
return ray.get_runtime_context().current_actor
with pytest.raises(RuntimeError):
serve.run(AlwaysUnhealthy.bind())
app_status = serve.status().applications[SERVE_DEFAULT_APP_NAME]
assert (
app_status.deployments["AlwaysUnhealthy"].status
== DeploymentStatus.DEPLOY_FAILED
)
def test_health_check_failure_makes_deployment_unhealthy_transition(serve_instance):
"""
If a deployment transitions to unhealthy, then continues to fail health check after
being restarted, the deployment should be unhealthy.
"""
class Toggle:
def __init__(self):
self._should_fail = False
def set_should_fail(self):
self._should_fail = True
def should_fail(self):
return self._should_fail
@serve.deployment(health_check_period_s=1, health_check_timeout_s=1)
class WillBeUnhealthy:
def __init__(self, toggle):
self._toggle = toggle
def check_health(self):
if ray.get(self._toggle.should_fail.remote()):
raise Exception("intended to fail")
def __call__(self, *args):
return ray.get_runtime_context().current_actor
def check_status(expected_status: DeploymentStatus):
app_status = serve.status().applications[SERVE_DEFAULT_APP_NAME]
assert app_status.deployments["WillBeUnhealthy"].status == expected_status
return True
toggle = ray.remote(Toggle).remote()
serve.run(WillBeUnhealthy.bind(toggle))
# Check that deployment is healthy initially
assert check_status(DeploymentStatus.HEALTHY)
ray.get(toggle.set_should_fail.remote())
# Check that deployment is now unhealthy
wait_for_condition(check_status, expected_status=DeploymentStatus.UNHEALTHY)
# Check that deployment stays unhealthy
for _ in range(5):
assert check_status(DeploymentStatus.UNHEALTHY)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| Patient |
python | getsentry__sentry | src/sentry/preprod/size_analysis/models.py | {
"start": 1141,
"end": 1300
} | class ____(BaseModel):
size_diff: int
head_size: int | None
base_size: int | None
path: str
item_type: str | None
type: DiffType
| DiffItem |
python | django__django | django/contrib/auth/migrations/0012_alter_user_first_name_max_length.py | {
"start": 43,
"end": 411
} | class ____(migrations.Migration):
dependencies = [
("auth", "0011_update_proxy_permissions"),
]
operations = [
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
]
| Migration |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_config_migrations.py | {
"start": 13960,
"end": 15577
} | class ____:
OLD_TEST_CONFIG = _config_path(f"{_MIGRATE_DEFAULT_ACTION_BREAKDOWNS_CONFIGS_PATH}/test_old_config.json")
NEW_TEST_CONFIG = _config_path(f"{_MIGRATE_DEFAULT_ACTION_BREAKDOWNS_CONFIGS_PATH}/test_new_config.json")
@staticmethod
def revert_migration(config_path: str) -> None:
with open(config_path, "r") as test_config:
config = json.load(test_config)
config.pop("default_action_breakdowns", None)
with open(config_path, "w") as updated_config:
config = json.dumps(config)
updated_config.write(config)
def test_migrate_client_config(self):
migration_instance = MigrateDefaultActionBreakdowns()
original_config = load_config(self.OLD_TEST_CONFIG)
# migrate the test_config
migration_instance.migrate([CMD, "--config", self.OLD_TEST_CONFIG], SOURCE)
# load the updated config
test_migrated_config = load_config(self.OLD_TEST_CONFIG)
# check migrated property
assert "default_ads_insights_action_breakdowns" in test_migrated_config
assert test_migrated_config["default_ads_insights_action_breakdowns"] == ["action_type", "action_target_id", "action_destination"]
# revert the test_config to the starting point
self.revert_migration(self.OLD_TEST_CONFIG)
def test_should_not_migrate_new_client_config(self):
new_config = load_config(self.NEW_TEST_CONFIG)
migration_instance = MigrateDefaultActionBreakdowns()
assert not migration_instance.should_migrate(new_config)
| TestMigrateDefaultActionBreakdowns |
python | mlflow__mlflow | mlflow/genai/optimize/types.py | {
"start": 3468,
"end": 4127
} | class ____:
"""
The output type of `eval_fn` in the
:py:func:`mlflow.genai.optimize.BasePromptOptimizer.optimize()` API.
Args:
inputs: The inputs of the evaluation.
outputs: The outputs of the prediction function.
expectations: The expected outputs.
score: The score of the evaluation result.
trace: The trace of the evaluation execution.
rationales: The rationales of the evaluation result.
"""
inputs: dict[str, Any]
outputs: Any
expectations: Any
score: float
trace: Trace
rationales: dict[str, str]
@experimental(version="3.5.0")
@dataclass
| EvaluationResultRecord |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 39302,
"end": 40784
} | class ____(object):
"""Tests that we can compute a gradient for var^2."""
def __init__(self, test, var, varnp, use_tape):
self.test = test
self.var = var
self.varnp = varnp
self.use_tape = use_tape
def __getitem__(self, spec):
with test_util.AbstractGradientTape(
use_tape=self.use_tape, persistent=True) as tape:
tape.watch(self.var)
val = self.var * self.var
slice_var = self.var[spec]
slice_val = val[spec]
# compute analytic 2nd derivative
analytic_grad2 = 2 * slice_val
dy = variables.Variable(
array_ops.ones_like(slice_var, dtype=dtypes.float32))
assign = dy.assign(slice_var)
slice_val_grad = tape.gradient(slice_val, self.var, [dy])
slice_val_grad2 = tape.gradient(slice_val_grad, dy, [self.var])
self.test.evaluate(assign)
slice_val_grad_evaled, slice_val_grad2_evaled = (
self.test.evaluate([slice_val_grad, slice_val_grad2]))
analytic_grad2_evaled = self.test.evaluate(analytic_grad2)
self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
# compute analytic gradient for slice
np_val_grad = (2 * self.varnp * self.varnp)
np_sliceval_grad = np.zeros(self.var.get_shape())
if isinstance(spec, tensor_lib.Tensor):
spec = self.test.evaluate(spec)
np_sliceval_grad[spec] = np_val_grad[spec]
# verify gradient
self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
| GradSliceChecker |
python | pytorch__pytorch | test/test_fx_experimental.py | {
"start": 67483,
"end": 71575
} | class ____(JitTestCase):
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_normalize_operator_exhaustive(self, device, dtype, op):
# These ops currently don't trace in FX for various reasons (i.e. they take a list of tensors)
fx_fail = {"cat", "stack", "hstack", "vstack", "dstack", "linalg.multi_dot", "_upsample_bilinear2d_aa", "_chunk_cat"}
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
if isinstance(op.op, torch._ops.OpOverload):
self.skipTest("normalize operator doesn't work on torch.ops")
for sample_input in sample_inputs_itr:
unsupported_arg_type = False
arg_values = [sample_input.input] + list(sample_input.args)
kwarg_values = sample_input.kwargs
arg_types = []
kwarg_types = {}
def jit_infer_type(v):
inferred_arg_type = torch._C._jit_try_infer_type(v)
assert inferred_arg_type.success()
t = _torchscript_type_to_python_type(inferred_arg_type.type())
return t
for v in arg_values:
if isinstance(v, torch.Tensor):
arg_types.append(type(v))
else:
if isinstance(v, complex):
# Complex type not supported in FX
unsupported_arg_type = True
arg_types.append(jit_infer_type(v))
for k, v in kwarg_values.items():
if isinstance(v, torch.Tensor):
kwarg_types[k] = type(v)
else:
if isinstance(v, complex):
# Complex type not supported in FX
unsupported_arg_type = True
kwarg_types[k] = jit_infer_type(v)
if unsupported_arg_type:
continue
# Test normalize_function by itself
ref_out = op.op(*arg_values, **kwarg_values)
norm_args_and_kwargs = normalize_function(
op.op, arg_values, kwarg_values, arg_types, kwarg_types
)
if norm_args_and_kwargs is None:
raise RuntimeError(
"""
FX failed to normalize op - add the op to the op_skip list.
A common reason is if your OpInfo was implemented with a lambda
- otherwise, file an issue
"""
)
test_out = op.op(*norm_args_and_kwargs.args, **norm_args_and_kwargs.kwargs)
self.assertEqual(test_out, ref_out)
# Test normalized_arguments as part of FX
if op.name in fx_fail:
continue
param_names = []
param_values = []
fx_args = []
idx = 0
def process_arg(arg, name):
if isinstance(arg, torch.Tensor):
param_names.append(name)
param_values.append(arg)
return name
else:
return f"{repr(arg)}"
def process_arg_with_idx(arg):
nonlocal idx
res = process_arg(arg, f"arg_{idx}")
idx = idx + 1
return res
def str_arg(arg):
if isinstance(arg, tuple):
args = [f"{str_arg(v)}, " for v in arg]
return f"({' '.join(args)})"
elif isinstance(arg, list):
args = [f"{str_arg(v)}" for v in arg]
return f"[{', '.join(args)}]"
else:
return arg
for v in arg_values:
arg = pytree.tree_map(process_arg_with_idx, v)
fx_args.append(str_arg(arg))
for k, v in kwarg_values.items():
arg = pytree.tree_map(functools.partial(process_arg, name=k), v)
fx_args.append(f"{k} = {str_arg(arg)}")
code = f"""
| TestNormalizeOperators |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 108359,
"end": 109111
} | class ____(TypeDecorator):
# previous workaround for array of enum
impl = postgresql.ARRAY
cache_ok = True
# note expanding logic is checking _is_array here so that has to
# translate through the TypeDecorator
def bind_expression(self, bindvalue):
return sa.cast(bindvalue, self)
def result_processor(self, dialect, coltype):
super_rp = super().result_processor(dialect, coltype)
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
return inner.split(",") if inner else []
def process(value):
if value is None:
return None
return super_rp(handle_raw_string(value))
return process
| _ArrayOfEnum |
python | readthedocs__readthedocs.org | readthedocs/config/models.py | {
"start": 2828,
"end": 3022
} | class ____(ConfigBaseModel):
ranking: dict[str, int] = {}
ignore: list[str] = [
"search.html",
"search/index.html",
"404.html",
"404/index.html",
]
| Search |
python | huggingface__transformers | src/transformers/models/efficientloftr/modeling_efficientloftr.py | {
"start": 19975,
"end": 22203
} | class ____(nn.Module):
def __init__(self, config: EfficientLoFTRConfig, layer_idx: int):
super().__init__()
self.q_aggregation_kernel_size = config.q_aggregation_kernel_size
self.aggregation = EfficientLoFTRAggregationLayer(config)
self.attention = EfficientLoFTRAttention(config, layer_idx)
self.mlp = EfficientLoFTRMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
batch_size, embed_dim, _, _ = hidden_states.shape
# Aggregate features
aggregated_hidden_states, aggregated_encoder_hidden_states = self.aggregation(
hidden_states, encoder_hidden_states
)
_, aggregated_h, aggregated_w, _ = aggregated_hidden_states.shape
# Multi-head attention
aggregated_hidden_states = aggregated_hidden_states.reshape(batch_size, -1, embed_dim)
aggregated_encoder_hidden_states = aggregated_encoder_hidden_states.reshape(batch_size, -1, embed_dim)
attn_output, _ = self.attention(
aggregated_hidden_states,
aggregated_encoder_hidden_states,
position_embeddings=position_embeddings,
**kwargs,
)
# Upsample features
# (batch_size, seq_len, embed_dim) -> (batch_size, embed_dim, h, w) with seq_len = h * w
attn_output = attn_output.permute(0, 2, 1)
attn_output = attn_output.reshape(batch_size, embed_dim, aggregated_h, aggregated_w)
attn_output = torch.nn.functional.interpolate(
attn_output, scale_factor=self.q_aggregation_kernel_size, mode="bilinear", align_corners=False
)
intermediate_states = torch.cat([hidden_states, attn_output], dim=1)
intermediate_states = intermediate_states.permute(0, 2, 3, 1)
output_states = self.mlp(intermediate_states)
output_states = output_states.permute(0, 3, 1, 2)
hidden_states = hidden_states + output_states
return hidden_states
| EfficientLoFTRAggregatedAttention |
python | ansible__ansible | .azure-pipelines/scripts/publish-codecov.py | {
"start": 539,
"end": 657
} | class ____:
name: str
path: pathlib.Path
flags: t.List[str]
@dataclasses.dataclass(frozen=True)
| CoverageFile |
python | getsentry__sentry | src/sentry/models/organizationmember.py | {
"start": 3488,
"end": 6947
} | class ____(BaseManager["OrganizationMember"]):
def get_contactable_members_for_org(self, organization_id: int) -> QuerySet:
"""Get a list of members we can contact for an organization through email."""
# TODO(Steve): check member-limit:restricted
return self.filter(
organization_id=organization_id,
invite_status=InviteStatus.APPROVED.value,
user_id__isnull=False,
)
def delete_expired(self, threshold: datetime.datetime) -> None:
"""Delete un-accepted member invitations that expired `threshold` days ago."""
from sentry.auth.services.auth import auth_service
orgs_with_scim = auth_service.get_org_ids_with_scim()
for member in (
self.filter(
token_expires_at__lt=threshold,
user_id__exact=None,
)
.exclude(email__exact=None)
.exclude(organization_id__in=orgs_with_scim)
):
member.delete()
def get_for_integration(
self, integration: RpcIntegration | int, user: RpcUser, organization_id: int | None = None
) -> QuerySet[OrganizationMember]:
# This can be moved into the integration service once OrgMemberMapping is completed.
# We are forced to do an ORM -> service -> ORM call to reduce query size while avoiding
# cross silo queries until we have a control silo side to map users through.
from sentry.integrations.services.integration import integration_service
if organization_id is not None:
if (
integration_service.get_organization_integration(
integration_id=extract_id_from(integration), organization_id=organization_id
)
is None
):
return self.filter(Q())
return self.filter(organization_id=organization_id, user_id=user.id)
org_ids = list(self.filter(user_id=user.id).values_list("organization_id", flat=True))
org_ids = [
oi.organization_id
for oi in integration_service.get_organization_integrations(
organization_ids=org_ids, integration_id=extract_id_from(integration)
)
]
return self.filter(user_id=user.id, organization_id__in=org_ids).select_related(
"organization"
)
def get_member_invite_query(self, id: int) -> QuerySet:
return self.filter(
invite_status__in=[
InviteStatus.REQUESTED_TO_BE_INVITED.value,
InviteStatus.REQUESTED_TO_JOIN.value,
],
user_id__isnull=True,
id=id,
)
def get_teams_by_user(self, organization: Organization) -> dict[int, list[int]]:
queryset = self.filter(organization_id=organization.id).values_list("user_id", "teams")
user_teams: dict[int, list[int]] = defaultdict(list)
for user_id, team_id in queryset:
if user_id is not None:
user_teams[user_id].append(team_id)
return user_teams
def get_members_by_email_and_role(self, email: str, role: str) -> QuerySet:
users_by_email = user_service.get_many(
filter=dict(
emails=[email],
is_active=True,
)
)
return self.filter(role=role, user_id__in=[u.id for u in users_by_email])
@region_silo_model
| OrganizationMemberManager |
python | sqlalchemy__sqlalchemy | test/sql/test_external_traversal.py | {
"start": 10817,
"end": 13154
} | class ____(fixtures.TestBase):
"""test the special binary product visit"""
def _assert_traversal(self, expr, expected):
canary = []
def visit(binary, l, r):
canary.append((binary.operator, l, r))
print(binary.operator, l, r)
sql_util.visit_binary_product(visit, expr)
eq_(canary, expected)
def test_basic(self):
a, b = column("a"), column("b")
self._assert_traversal(a == b, [(operators.eq, a, b)])
def test_with_tuples(self):
a, b, c, d, b1, b1a, b1b, e, f = (
column("a"),
column("b"),
column("c"),
column("d"),
column("b1"),
column("b1a"),
column("b1b"),
column("e"),
column("f"),
)
expr = tuple_(a, b, b1 == tuple_(b1a, b1b == d), c) > tuple_(
func.go(e + f)
)
self._assert_traversal(
expr,
[
(operators.gt, a, e),
(operators.gt, a, f),
(operators.gt, b, e),
(operators.gt, b, f),
(operators.eq, b1, b1a),
(operators.eq, b1b, d),
(operators.gt, c, e),
(operators.gt, c, f),
],
)
def test_composed(self):
a, b, e, f, q, j, r = (
column("a"),
column("b"),
column("e"),
column("f"),
column("q"),
column("j"),
column("r"),
)
expr = and_((a + b) == q + func.sum(e + f), and_(j == r, f == q))
self._assert_traversal(
expr,
[
(operators.eq, a, q),
(operators.eq, a, e),
(operators.eq, a, f),
(operators.eq, b, q),
(operators.eq, b, e),
(operators.eq, b, f),
(operators.eq, j, r),
(operators.eq, f, q),
],
)
def test_subquery(self):
a, b, c = column("a"), column("b"), column("c")
subq = select(c).where(c == a).scalar_subquery()
expr = and_(a == b, b == subq)
self._assert_traversal(
expr, [(operators.eq, a, b), (operators.eq, b, subq)]
)
| BinaryEndpointTraversalTest |
python | python-openxml__python-docx | src/docx/enum/text.py | {
"start": 5370,
"end": 6394
} | class ____(BaseXmlEnum):
"""Specifies the tab stop alignment to apply.
MS API name: `WdTabAlignment`
URL: https://msdn.microsoft.com/EN-US/library/office/ff195609.aspx
"""
LEFT = (0, "left", "Left-aligned.")
"""Left-aligned."""
CENTER = (1, "center", "Center-aligned.")
"""Center-aligned."""
RIGHT = (2, "right", "Right-aligned.")
"""Right-aligned."""
DECIMAL = (3, "decimal", "Decimal-aligned.")
"""Decimal-aligned."""
BAR = (4, "bar", "Bar-aligned.")
"""Bar-aligned."""
LIST = (6, "list", "List-aligned. (deprecated)")
"""List-aligned. (deprecated)"""
CLEAR = (101, "clear", "Clear an inherited tab stop.")
"""Clear an inherited tab stop."""
END = (102, "end", "Right-aligned. (deprecated)")
"""Right-aligned. (deprecated)"""
NUM = (103, "num", "Left-aligned. (deprecated)")
"""Left-aligned. (deprecated)"""
START = (104, "start", "Left-aligned. (deprecated)")
"""Left-aligned. (deprecated)"""
| WD_TAB_ALIGNMENT |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 15160,
"end": 15571
} | class ____(GroupType):
type_id = 1911
slug = "performance_m_n_plus_one_db_queries_experimental"
description = "MN+1 Query (Experimental)"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.DB_QUERY.value
noise_config = NoiseConfig()
default_priority = PriorityLevel.LOW
released = False
@dataclass(frozen=True)
| PerformanceMNPlusOneDBQueriesExperimentalGroupType |
python | getsentry__sentry | src/sentry/integrations/discord/webhooks/command.py | {
"start": 2712,
"end": 5966
} | class ____(MessagingIntegrationCommandDispatcher[str]):
request: DiscordRequest
@property
def integration_spec(self) -> MessagingIntegrationSpec:
return DiscordMessagingSpec()
def help_handler(self, input: CommandInput) -> IntegrationResponse[str]:
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=HELP_MESSAGE,
)
def link_user_handler(self, _: CommandInput) -> IntegrationResponse[str]:
if self.request.has_identity():
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=ALREADY_LINKED_MESSAGE.format(email=self.request.get_identity_str()),
outcome_reason=str(MessageCommandHaltReason.ALREADY_LINKED),
context_data={
"email": self.request.get_identity_str(),
},
)
if not self.request.integration or not self.request.user_id:
# TODO: remove this logger
logger.warning(
"discord.interaction.command.missing.integration",
extra={
"hasIntegration": bool(self.request.integration),
"hasUserId": self.request.user_id,
},
)
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.FAILURE,
response=MISSING_DATA_MESSAGE,
outcome_reason=str(MessageCommandFailureReason.MISSING_DATA),
context_data={
"has_integration": bool(self.request.integration),
"has_user_id": bool(self.request.user_id),
},
)
link_url = build_linking_url(
integration=self.request.integration,
discord_id=self.request.user_id,
)
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=LINK_USER_MESSAGE.format(url=link_url),
)
def unlink_user_handler(self, input: CommandInput) -> IntegrationResponse[str]:
if not self.request.has_identity():
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=NOT_LINKED_MESSAGE,
outcome_reason=str(MessageCommandHaltReason.NOT_LINKED),
)
# if self.request.has_identity() then these must not be None
assert self.request.integration is not None
assert self.request.user_id is not None
unlink_url = build_unlinking_url(
integration=self.request.integration,
discord_id=self.request.user_id,
)
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=UNLINK_USER_MESSAGE.format(url=unlink_url),
)
@property
def command_handlers(
self,
) -> Iterable[tuple[MessagingIntegrationCommand, CommandHandler[str]]]:
yield commands.HELP, self.help_handler
yield commands.LINK_IDENTITY, self.link_user_handler
yield commands.UNLINK_IDENTITY, self.unlink_user_handler
| DiscordCommandDispatcher |
python | viewflow__viewflow | viewflow/workflow/flow/views/actions.py | {
"start": 445,
"end": 1087
} | class ____(
mixins.SuccessMessageMixin,
mixins.TaskSuccessUrlMixin,
mixins.TaskViewTemplateNames,
generic.FormView,
):
"""
Default assign view for flow task.
Get confirmation from user, assigns task and redirects to task pages
"""
form_class = forms.Form
template_filename = "task_assign.html"
success_message = _("Task {task} has been assigned.")
def form_valid(self, *args, **kwargs):
"""If the form is valid, save the associated model and finish the task."""
self.request.activation.assign(self.request.user)
return super().form_valid(*args, **kwargs)
| AssignTaskView |
python | Delgan__loguru | loguru/_string_parsers.py | {
"start": 63,
"end": 9324
} | class ____:
"""Provide static methods to compute the next occurrence of various time frequencies.
Includes hourly, daily, weekly, monthly, and yearly frequencies
based on a given datetime object.
"""
@staticmethod
def hourly(t: datetime.datetime) -> datetime.datetime:
"""Compute the next hour occurrence.
Parameters
----------
t : datetime.datetime
The reference datetime.
Returns
-------
datetime.datetime
Next hour with minutes, seconds, microseconds set to zero.
"""
dt = t + datetime.timedelta(hours=1)
return dt.replace(minute=0, second=0, microsecond=0)
@staticmethod
def daily(t: datetime.datetime) -> datetime.datetime:
"""Compute the next day occurrence.
Parameters
----------
t : datetime.datetime
The reference datetime.
Returns
-------
datetime.datetime
Next day with hour, minutes, seconds, microseconds set to zero.
"""
dt = t + datetime.timedelta(days=1)
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
@staticmethod
def weekly(t: datetime.datetime) -> datetime.datetime:
"""Compute the next week occurrence.
Parameters
----------
t : datetime.datetime
The reference datetime.
Returns
-------
datetime.datetime
Next Monday with hour, minutes, seconds, microseconds set to zero.
"""
dt = t + datetime.timedelta(days=7 - t.weekday())
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
@staticmethod
def monthly(t: datetime.datetime) -> datetime.datetime:
"""Compute the next month occurrence.
Parameters
----------
t : datetime.datetime
The reference datetime.
Returns
-------
datetime.datetime
First day of next month with hour, minutes, seconds, microseconds set to zero.
"""
if t.month == 12:
y, m = t.year + 1, 1
else:
y, m = t.year, t.month + 1
return t.replace(year=y, month=m, day=1, hour=0, minute=0, second=0, microsecond=0)
@staticmethod
def yearly(t: datetime.datetime) -> datetime.datetime:
"""Compute the next year occurrence.
Parameters
----------
t : datetime.datetime
The reference datetime.
Returns
-------
datetime.datetime
First day of next year with hour, minutes, seconds, microseconds set to zero.
"""
y = t.year + 1
return t.replace(year=y, month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
def parse_size(size: str) -> Optional[float]:
"""Parse a size string with optional units into bits.
Supports formats like '100MB', '2GiB', '1.5TB'. Case insensitive.
Parameters
----------
size : str
Size string to parse (e.g., '100MB', '2GiB').
Returns
-------
float | None
Size in bits or None if invalid format.
Raises
------
ValueError
If numeric value or unit is invalid.
"""
size = size.strip()
reg = re.compile(r"([e\+\-\.\d]+)\s*([kmgtpezy])?(i)?(b)", flags=re.I)
match = reg.fullmatch(size)
if not match:
return None
s, u, i, b = match.groups()
try:
s = float(s)
except ValueError as err:
raise ValueError("Invalid float value while parsing size: '%s'" % s) from err
u = "kmgtpezy".index(u.lower()) + 1 if u else 0
i = 1024 if i else 1000
b = {"b": 8, "B": 1}[b] if b else 1
return s * i**u / b
def parse_duration(duration: str) -> Optional[datetime.timedelta]:
"""Parse a duration string and return a corresponding timedelta object.
The string can include multiple units (years, months, weeks, days, hours, minutes, seconds).
Example: "1h 30min", "2 days, 3h", "1.5y 2months".
Parameters
----------
duration : str
The duration string to parse.
Returns
-------
datetime.timedelta | None
The parsed duration or None if input is invalid.
Raises
------
ValueError
If a value cannot be converted to float or if an invalid unit is encountered.
"""
duration = duration.strip()
reg = r"(?:([e\+\-\.\d]+)\s*([a-z]+)[\s\,]*)"
units = [
("y|years?", 31536000),
("months?", 2628000),
("w|weeks?", 604800),
("d|days?", 86400),
("h|hours?", 3600),
("min(?:ute)?s?", 60),
("s|sec(?:ond)?s?", 1), # spellchecker: disable-line
("ms|milliseconds?", 0.001),
("us|microseconds?", 0.000001),
]
if not re.fullmatch(reg + "+", duration, flags=re.I):
return None
seconds = 0
for value, unit in re.findall(reg, duration, flags=re.I):
try:
value = float(value)
except ValueError as e:
raise ValueError("Invalid float value while parsing duration: '%s'" % value) from e
try:
unit = next(u for r, u in units if re.fullmatch(r, unit, flags=re.I))
except StopIteration:
raise ValueError("Invalid unit value while parsing duration: '%s'" % unit) from None
seconds += value * unit
return datetime.timedelta(seconds=seconds)
def parse_frequency(frequency: str):
"""Parse a frequency string and return the corresponding Frequencies method.
Supported frequencies: hourly, daily, weekly, monthly, yearly.
Parameters
----------
frequency : str
The frequency string.
Returns
-------
Callable | None
Corresponding Frequencies method or None if unrecognized.
"""
frequencies = {
"hourly": Frequencies.hourly,
"daily": Frequencies.daily,
"weekly": Frequencies.weekly,
"monthly": Frequencies.monthly,
"yearly": Frequencies.yearly,
}
frequency = frequency.strip().lower()
return frequencies.get(frequency, None)
def parse_day(day: str) -> Optional[int]:
"""Parse a weekday string and return its integer value.
Accepts full day names or "w0" to "w6".
Parameters
----------
day : str
The day to parse.
Returns
-------
int | None
Integer value (Monday=0 ... Sunday=6), or None if invalid.
Raises
------
ValueError
If the digit in 'wX' is not in range [0-6].
"""
days = {
"monday": 0,
"tuesday": 1,
"wednesday": 2,
"thursday": 3,
"friday": 4,
"saturday": 5,
"sunday": 6,
}
day = day.strip().lower()
if day in days:
return days[day]
if day.startswith("w") and day[1:].isdigit():
day = int(day[1:])
if not 0 <= day < 7:
raise ValueError("Invalid weekday value while parsing day (expected [0-6]): '%d'" % day)
else:
day = None
return day
def parse_time(time: str) -> datetime.time:
"""Parse a time string and return a `datetime.time` object.
Supports formats: HH, HH:MM, HH:MM:SS, HH AM/PM, etc.
Parameters
----------
time : str
The time string.
Returns
-------
datetime.time
The parsed time.
Raises
------
ValueError
If input doesn't match any supported format.
"""
time = time.strip()
reg = re.compile(r"^[\d\.\:]+\s*(?:[ap]m)?$", flags=re.I)
if not reg.match(time):
return None
formats = [
"%H",
"%H:%M",
"%H:%M:%S",
"%H:%M:%S.%f",
"%I %p",
"%I:%M %S",
"%I:%M:%S %p",
"%I:%M:%S.%f %p",
]
for format_ in formats:
try:
dt = datetime.datetime.strptime(time, format_)
except ValueError:
pass
else:
return dt.time()
raise ValueError("Unrecognized format while parsing time: '%s'" % time)
def parse_daytime(daytime: str) -> Optional[Tuple[int, datetime.time]]:
"""Parse a string representing a day and time separated by 'at'.
Parameters
----------
daytime : str
The day and time string.
Returns
-------
tuple[int, datetime.time] | None
Parsed (day, time) or None.
Raises
------
ValueError
If the day or time cannot be parsed.
"""
daytime = daytime.strip()
reg = re.compile(r"^(.*?)\s+at\s+(.*)$", flags=re.I)
match = reg.match(daytime)
if match:
day, time = match.groups()
else:
day = time = daytime
try:
parsed_day = parse_day(day)
if match and parsed_day is None:
raise ValueError("Unparsable day")
except ValueError as e:
raise ValueError("Invalid day while parsing daytime: '%s'" % day) from e
try:
parsed_time = parse_time(time)
if match and parsed_time is None:
raise ValueError("Unparsable time")
except ValueError as e:
raise ValueError("Invalid time while parsing daytime: '%s'" % time) from e
if parsed_day is None and parsed_time is None:
return None
return parsed_day, parsed_time
| Frequencies |
python | jupyterlab__jupyterlab | jupyterlab/labextensions.py | {
"start": 5851,
"end": 7413
} | class ____(BaseExtensionApp):
description = """Install labextension(s)
Usage
jupyter labextension install [--pin-version-as <alias,...>] <package...>
This installs JupyterLab extensions similar to yarn add or npm install.
Pass a list of comma separate names to the --pin-version-as flag
to use as aliases for the packages providers. This is useful to
install multiple versions of the same extension.
These can be uninstalled with the alias you provided
to the flag, similar to the "alias" feature of yarn add.
"""
aliases = install_aliases
pin = Unicode("", config=True, help="Pin this version with a certain alias")
def run_task(self):
self.deprecation_warning(
"Installing extensions with the jupyter labextension install command is now deprecated and will be removed in a future major version of JupyterLab."
)
pinned_versions = self.pin.split(",")
self.extra_args = self.extra_args or [os.getcwd()]
return any(
install_extension(
arg,
# Pass in pinned alias if we have it
pin=pinned_versions[i] if i < len(pinned_versions) else None,
app_options=AppOptions(
app_dir=self.app_dir,
logger=self.log,
core_config=self.core_config,
labextensions_path=self.labextensions_path,
),
)
for i, arg in enumerate(self.extra_args)
)
| InstallLabExtensionApp |
python | kubernetes-client__python | kubernetes/client/models/v1_container_image.py | {
"start": 383,
"end": 4498
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'names': 'list[str]',
'size_bytes': 'int'
}
attribute_map = {
'names': 'names',
'size_bytes': 'sizeBytes'
}
def __init__(self, names=None, size_bytes=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerImage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._names = None
self._size_bytes = None
self.discriminator = None
if names is not None:
self.names = names
if size_bytes is not None:
self.size_bytes = size_bytes
@property
def names(self):
"""Gets the names of this V1ContainerImage. # noqa: E501
Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"] # noqa: E501
:return: The names of this V1ContainerImage. # noqa: E501
:rtype: list[str]
"""
return self._names
@names.setter
def names(self, names):
"""Sets the names of this V1ContainerImage.
Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"] # noqa: E501
:param names: The names of this V1ContainerImage. # noqa: E501
:type: list[str]
"""
self._names = names
@property
def size_bytes(self):
"""Gets the size_bytes of this V1ContainerImage. # noqa: E501
The size of the image in bytes. # noqa: E501
:return: The size_bytes of this V1ContainerImage. # noqa: E501
:rtype: int
"""
return self._size_bytes
@size_bytes.setter
def size_bytes(self, size_bytes):
"""Sets the size_bytes of this V1ContainerImage.
The size of the image in bytes. # noqa: E501
:param size_bytes: The size_bytes of this V1ContainerImage. # noqa: E501
:type: int
"""
self._size_bytes = size_bytes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerImage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerImage):
return True
return self.to_dict() != other.to_dict()
| V1ContainerImage |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 58878,
"end": 68791
} | class ____(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [_hval(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [_hval(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(_hval(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', _hval(c.OutputString())))
if py3k:
out = [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def local_property(name=None):
if name: depr('local_property() is deprecated and will be removed.') #0.12
ls = threading.local()
def fget(self):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): ls.var = value
def fdel(self): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
| BaseResponse |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 14746,
"end": 14941
} | class ____(_CreateDropBase[_SI]):
def __init__(self, element: _SI, if_not_exists: bool = False) -> None:
super().__init__(element)
self.if_not_exists = if_not_exists
| _CreateBase |
python | plotly__plotly.py | plotly/graph_objs/sunburst/marker/colorbar/title/_font.py | {
"start": 233,
"end": 9954
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sunburst.marker.colorbar.title"
_path_str = "sunburst.marker.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sunburst.marke
r.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sunburst.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.marker.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | jazzband__django-formtools | tests/wizard/test_forms.py | {
"start": 1300,
"end": 1522
} | class ____(forms.ModelForm):
class Meta:
model = TestModel
fields = '__all__'
TestModelFormSet = forms.models.modelformset_factory(TestModel, form=TestModelForm, extra=2, fields='__all__')
| TestModelForm |
python | kamyu104__LeetCode-Solutions | Python/detect-cycles-in-2d-grid.py | {
"start": 56,
"end": 521
} | class ____(object):
def __init__(self, n):
self.set = range(n)
self.count = n
def find_set(self, x):
if self.set[x] != x:
self.set[x] = self.find_set(self.set[x]) # path compression.
return self.set[x]
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root != y_root:
self.set[min(x_root, y_root)] = max(x_root, y_root)
self.count -= 1
| UnionFind |
python | getsentry__sentry | tests/sentry/api/bases/test_organization.py | {
"start": 13783,
"end": 14835
} | class ____(TestCase):
@cached_property
def endpoint(self):
return OrganizationEndpoint()
@cached_property
def user(self):
return self.create_user("tester@test.com")
@cached_property
def member(self):
return self.create_user("member@test.com")
@cached_property
def owner(self):
return self.create_user("owner@test.com")
@cached_property
def org(self):
org = self.create_organization("test", self.owner)
org.flags.allow_joinleave = False
org.save()
return org
def build_request(self, user=None, active_superuser=False, **params):
request = RequestFactory().get("/", params)
request.session = SessionBase()
if active_superuser:
request.superuser = MockSuperUser()
if user is None:
user = self.user
request.user = user
request.auth = None
request.access = from_request(drf_request_from_request(request), self.org)
return request
| BaseOrganizationEndpointTest |
python | jazzband__django-waffle | waffle/tests/test_models.py | {
"start": 188,
"end": 1461
} | class ____(TestCase):
def test_natural_keys(self):
flag = get_waffle_flag_model().objects.create(name='test-flag')
switch = get_waffle_switch_model().objects.create(name='test-switch')
sample = get_waffle_sample_model().objects.create(name='test-sample', percent=0)
self.assertEqual(flag.natural_key(), ('test-flag',))
self.assertEqual(switch.natural_key(), ('test-switch',))
self.assertEqual(sample.natural_key(), ('test-sample',))
self.assertEqual(
get_waffle_flag_model().objects.get_by_natural_key("test-flag"), flag
)
self.assertEqual(
get_waffle_switch_model().objects.get_by_natural_key("test-switch"), switch
)
self.assertEqual(
get_waffle_sample_model().objects.get_by_natural_key("test-sample"), sample
)
def test_flag_is_not_active_for_none_requests(self):
flag = get_waffle_flag_model().objects.create(name='test-flag')
self.assertEqual(flag.is_active(None), False)
def test_is_active_for_user_when_everyone_is_active(self):
flag = get_waffle_flag_model().objects.create(name='test-flag')
flag.everyone = True
self.assertEqual(flag.is_active_for_user(User()), True)
| ModelsTests |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/types.py | {
"start": 3265,
"end": 3686
} | class ____(ConfigScalar):
def __init__(self):
super().__init__(
key=type(self).__name__,
given_name=type(self).__name__,
scalar_kind=ConfigScalarKind.STRING,
)
def post_process(self, value):
if not _is_valid_dataset(value):
raise PostProcessingError('Datasets must be of the form "project.dataset" or "dataset"')
return value
| _Dataset |
python | apache__airflow | providers/google/tests/unit/google/common/hooks/test_base_google.py | {
"start": 3723,
"end": 5013
} | class ____:
@pytest.mark.parametrize(
("exc", "retryable"),
[
(RefreshError("Other error", "test body"), False),
(RefreshError("Unable to acquire impersonated credentials", "test body"), True),
(ValueError(), False),
],
)
def test_is_refresh_credentials_exception(self, exc, retryable):
assert is_refresh_credentials_exception(exc) is retryable
def test_do_nothing_on_non_error(self):
@hook.GoogleBaseHook.refresh_credentials_retry()
def func():
return 42
assert func() == 42
def test_raise_non_refresh_error(self):
@hook.GoogleBaseHook.refresh_credentials_retry()
def func():
raise ValueError("This is a test ValueError.")
with pytest.raises(ValueError, match="This is a test ValueError."):
func()
@mock.patch("tenacity.nap.time.sleep", mock.MagicMock())
def test_retry_on_refresh_error(self):
func_return = mock.Mock(
side_effect=[RefreshError("Unable to acquire impersonated credentials", "test body"), 42]
)
@hook.GoogleBaseHook.refresh_credentials_retry()
def func():
return func_return()
assert func() == 42
| TestRefreshCredentialsRetry |
python | vyperlang__vyper | vyper/abi_types.py | {
"start": 4505,
"end": 5133
} | class ____(ABIType):
def __init__(self, bytes_bound):
if not bytes_bound >= 0:
raise InvalidABIType("Negative bytes_bound provided to ABI_Bytes")
self.bytes_bound = bytes_bound
def is_dynamic(self):
return True
# note that static_size for dynamic types is always 0
# (and embedded_static_size is always 32)
def static_size(self):
return 0
def dynamic_size_bound(self):
# length word + data
return 32 + ceil32(self.bytes_bound)
def selector_name(self):
return "bytes"
def is_complex_type(self):
return False
| ABI_Bytes |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/callbacks.py | {
"start": 1181,
"end": 1295
} | class ____(str, Enum):
"""Enum class for execution mode."""
SYNC = "sync"
ASYNC = "async"
| ExecutionMode |
python | google__pytype | pytype/overlays/special_builtins.py | {
"start": 26165,
"end": 26778
} | class ____(BuiltinClass):
"""Static method decorator."""
# Minimal signature, only used for constructing exceptions.
_SIGNATURE = function.Signature.from_param_names("staticmethod", ("func",))
_NAME = "staticmethod"
def call(self, node, func, args, alias_map=None):
if len(args.posargs) != 1:
raise error_types.WrongArgCount(self._SIGNATURE, args, self.ctx)
arg = args.posargs[0]
if not _check_method_decorator_arg(arg, "staticmethod", self.ctx):
return node, self.ctx.new_unsolvable(node)
return node, StaticMethodInstance(self.ctx, self, arg).to_variable(node)
| StaticMethod |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 11281,
"end": 11559
} | class ____(str, Enum):
"""
Class with TriggeredBy types for DagRun.
"""
CLI = "cli"
OPERATOR = "operator"
REST_API = "rest_api"
UI = "ui"
TEST = "test"
TIMETABLE = "timetable"
ASSET = "asset"
BACKFILL = "backfill"
| DagRunTriggeredByType |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/input_managers.py | {
"start": 5470,
"end": 5983
} | class ____(MyIOManager):
def load_input(self, context: dg.InputContext):
if context.upstream_output is None:
# load input from table since there is no upstream output
return read_dataframe_from_table(name="table_1")
else:
return super().load_input(context)
# end_better_load_input_subset
# start_execute_subselection
my_subselection_job.execute_in_process(
op_selection=["op2"],
)
# end_execute_subselection
# start_per_input_config
| MyNewInputLoader |
python | huggingface__transformers | src/transformers/models/splinter/modeling_splinter.py | {
"start": 25340,
"end": 26459
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when start and end positions are provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`):
Span-end scores (before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
start_logits: Optional[torch.FloatTensor] = None
end_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring(
custom_intro="""
Splinter Model for the recurring span selection task as done during the pretraining. The difference to the QA task
is that we do not have a question, but multiple question tokens that replace the occurrences of recurring spans
instead.
"""
)
| SplinterForPreTrainingOutput |
python | getsentry__sentry | src/sentry/auth/services/auth/model.py | {
"start": 6125,
"end": 7313
} | class ____(RpcModel):
id: int = -1
organization_id: int = -1
provider: str = ""
flags: RpcAuthProviderFlags = Field(default_factory=lambda: RpcAuthProviderFlags())
config: dict[str, Any]
default_role: int = -1
default_global_access: bool = False
def __hash__(self) -> int:
return hash((self.id, self.organization_id, self.provider))
def get_audit_log_data(self) -> dict[str, Any]:
provider = self.provider
# NOTE(isabella): for both standard fly SSO and fly-non-partner SSO, we should record the
# provider as "fly" in the audit log entry data; the only difference between the two is
# that the latter can be disabled by customers
if "fly" in self.provider:
provider = "fly"
return {"provider": provider, "config": self.config}
def get_provider(self) -> "Provider":
from sentry.auth import manager
return manager.get(self.provider, **self.config)
def get_scim_token(self) -> str | None:
from sentry.models.authprovider import get_scim_token
return get_scim_token(self.flags.scim_enabled, self.organization_id, self.provider)
| RpcAuthProvider |
python | kamyu104__LeetCode-Solutions | Python/rearrange-words-in-a-sentence.py | {
"start": 33,
"end": 327
} | class ____(object):
def arrangeWords(self, text):
"""
:type text: str
:rtype: str
"""
result = text.split()
result[0] = result[0].lower()
result.sort(key=len)
result[0] = result[0].title()
return " ".join(result)
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/callbacks/base_handler.py | {
"start": 317,
"end": 1664
} | class ____(ABC):
"""Base callback handler that can be used to track event starts and ends."""
def __init__(
self,
event_starts_to_ignore: List[CBEventType],
event_ends_to_ignore: List[CBEventType],
) -> None:
"""Initialize the base callback handler."""
self.event_starts_to_ignore = tuple(event_starts_to_ignore)
self.event_ends_to_ignore = tuple(event_ends_to_ignore)
@abstractmethod
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
parent_id: str = "",
**kwargs: Any,
) -> str:
"""Run when an event starts and return id of event."""
@abstractmethod
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
"""Run when an event ends."""
@abstractmethod
def start_trace(self, trace_id: Optional[str] = None) -> None:
"""Run when an overall trace is launched."""
@abstractmethod
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
"""Run when an overall trace is exited."""
| BaseCallbackHandler |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_member_details.py | {
"start": 1303,
"end": 1497
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-member-details"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
| OrganizationMemberTestBase |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_indexing.py | {
"start": 638,
"end": 52559
} | class ____:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
tm.assert_index_equal(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem_numeric_should_not_fallback_to_positional(self, any_numeric_dtype):
# GH51053
dtype = any_numeric_dtype
idx = Index([1, 0, 1], dtype=dtype)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=idx)
result = df[1]
expected = DataFrame([[1, 3], [4, 6]], columns=Index([1, 1], dtype=dtype))
tm.assert_frame_equal(result, expected, check_exact=True)
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.default_rng(2).standard_normal(len(df))
ad = np.random.default_rng(2).standard_normal(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_numeric_should_not_fallback_to_positional(self, any_numeric_dtype):
# GH51053
dtype = any_numeric_dtype
idx = Index([1, 0, 1], dtype=dtype)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=idx)
df[1] = 10
expected = DataFrame([[10, 2, 10], [10, 5, 10]], columns=idx)
tm.assert_frame_equal(df, expected, check_exact=True)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=int)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=int, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[len(datetime_frame) // 2]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(TypeError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning, match="will be reindexed"):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"lst", [[True, False, True], [True, True, True], [False, False, False]]
)
def test_getitem_boolean_list(self, lst):
df = DataFrame(np.arange(12).reshape(3, 4))
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
def test_getitem_boolean_iadd(self):
arr = np.random.default_rng(2).standard_normal((5, 5))
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 3)),
index=[1, 10, "C", "E"],
columns=[1, 2, 3],
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame, using_infer_string):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.default_rng(2).standard_normal(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
smaller["col10"] = ["1", "2"]
if using_infer_string:
assert smaller["col10"].dtype == "str"
else:
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values.copy()
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
df_orig = df.copy()
mask = df > np.abs(df)
df[df > np.abs(df)] = np.nan
values = df_orig.values.copy()
values[mask.values] = np.nan
expected = DataFrame(values, index=df_orig.index, columns=df_orig.columns)
tm.assert_frame_equal(df, expected)
# set from DataFrame
df[df > np.abs(df)] = df * 2
np.putmask(values, mask.values, df.values * 2)
expected = DataFrame(values, index=df_orig.index, columns=df_orig.columns)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame, using_infer_string):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"].iloc[0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
if using_infer_string:
assert dm["foo"].dtype == "str"
else:
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
if using_infer_string:
assert dm["coercible"].dtype == "str"
else:
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.default_rng(2).random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self, using_infer_string):
# Difficulties with mixed-type data
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
if using_infer_string:
assert dm[2].dtype == "str"
else:
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a.copy()
tm.assert_frame_equal(result, df)
def test_getitem_slice_empty(self):
df = DataFrame([[1]], columns=MultiIndex.from_product([["A"], ["a"]]))
result = df[:]
expected = DataFrame([[1]], columns=MultiIndex.from_product([["A"], ["a"]]))
tm.assert_frame_equal(result, expected)
# Ensure df[:] returns a view of df, not the same object
assert result is not df
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)))
# this is OK
df.iloc[:8:2]
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 5)), index=range(0, 20, 2)
)
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
original = float_frame.copy()
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
sliced.loc[:, "C"] = 4.0
tm.assert_frame_equal(float_frame, original)
def test_getitem_setitem_non_ix_labels(self):
df = DataFrame(range(20), index=date_range("2020-01-01", periods=20))
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
f[col]
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.default_rng(2).standard_normal()
expected.iloc[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
values = expected.values.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
values[mask.values] = 0.0
expected = DataFrame(values, index=expected.index, columns=expected.columns)
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
values = expected.values.copy()
frame.loc[mask, ["A", "B"]] = 0.0
values[mask.values, :2] = 0.0
expected = DataFrame(values, index=expected.index, columns=expected.columns)
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_float_label_positional(self):
# GH 53338
index = Index([1.5, 2])
df = DataFrame(range(2), index=index)
result = df[1:2]
expected = DataFrame([1], index=[2.0])
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102").as_unit("s")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[s]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
with pytest.raises(TypeError, match="Invalid value"):
df.loc["b", "timestamp"] = iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
with pytest.raises(TypeError, match="Invalid value"):
df.loc[1, "b"] = 1
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
with pytest.raises(TypeError, match="Invalid value"):
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
# Manually upcast so we can add .5
df = df.astype({"A": "float64", "B": "float64"})
df2 = df2.astype({"A": "float64", "B": "float64"})
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)),
index=["foo", "foo", "bar", "baz", "bar"],
)
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)),
index=["foo", "foo", "bar", "baz", "bar"],
)
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], index=range(1), name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_callable_tuple_return_value_raises(self):
# GH53769: Enforced pandas 3.0
df = DataFrame(np.arange(40).reshape(10, 4), index=range(0, 20, 2))
msg = "Returning a tuple from"
with pytest.raises(ValueError, match=msg):
df.iloc[lambda _: (0,)]
with pytest.raises(ValueError, match=msg):
df.iloc[lambda _: (0,)] = 1
def test_iloc_row(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)), index=range(0, 20, 2)
)
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)), index=range(0, 20, 2)
)
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
exp_col = original[2].copy()
subset.loc[:, 2] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 10)), columns=range(0, 20, 2)
)
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 10)), columns=range(0, 20, 2)
)
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
df = DataFrame({"dates": column})
df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
def test_loc_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range("20130101", periods=4, unit="ns"))
df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")
df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")
df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")
df["F"] = np.timedelta64("NaT")
df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")
df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3, unit="ns")
df["H"] = np.datetime64("NaT")
result = df.dtypes
expected = Series(
[np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,
index=list("ABCDEFGH"),
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_indexing_mixed(self):
df = DataFrame(
{
0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
1: {
35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139,
},
2: {
35: np.nan,
40: np.nan,
43: 0.29012581014105987,
49: np.nan,
50: np.nan,
},
3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
4: {
35: 0.34215328467153283,
40: np.nan,
43: np.nan,
49: np.nan,
50: np.nan,
},
"y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},
}
)
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
tm.assert_frame_equal(df2, expected)
df["foo"] = "test"
msg = "not supported between instances|unorderable types|Invalid comparison"
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
def test_type_error_multiindex(self):
# See gh-12218
mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
dg = DataFrame(
[[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index(range(2), name="i")
)
with pytest.raises(InvalidIndexError, match="slice"):
dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]
)
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
tm.assert_frame_equal(result, expected)
name = ("x", 0)
index = Index(range(2), name="i")
expected = Series([1, 3], index=index, name=name)
result = dg["x", 0]
tm.assert_series_equal(result, expected)
def test_getitem_interval_index_partial_indexing(self):
# GH#36490
df = DataFrame(
np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))
)
expected = df.iloc[:, 0]
res = df[0.5]
tm.assert_series_equal(res, expected)
res = df.loc[:, 0.5]
tm.assert_series_equal(res, expected)
def test_setitem_array_as_cell_value(self):
# GH#43422
df = DataFrame(columns=["a", "b"], dtype=object)
df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))}
expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]})
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_nullable_2d_values(self):
df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
orig = df.copy()
df.loc[:] = df.values[:, ::-1]
tm.assert_frame_equal(df, orig)
df.loc[:] = pd.core.arrays.NumpyExtensionArray(df.values[:, ::-1])
tm.assert_frame_equal(df, orig)
df.iloc[:] = df.iloc[:, :].copy()
tm.assert_frame_equal(df, orig)
def test_getitem_segfault_with_empty_like_object(self):
# GH#46848
df = DataFrame(np.empty((1, 1), dtype=object))
df[0] = np.empty_like(df[0])
# this produces the segfault
df[[0]]
@pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
@pytest.mark.parametrize(
"null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]
)
def test_setting_mismatched_na_into_nullable_fails(
self, null, any_numeric_ea_dtype
):
# GH#44514 don't cast mismatched nulls to pd.NA
df = DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype)
ser = df["A"].copy()
arr = ser._values
msg = "|".join(
[
r"timedelta64\[ns\] cannot be converted to (Floating|Integer)Dtype",
r"datetime64\[ns\] cannot be converted to (Floating|Integer)Dtype",
"'values' contains non-numeric NA",
r"Invalid value '.*' for dtype '(U?Int|Float)\d{1,2}'",
]
)
with pytest.raises(TypeError, match=msg):
arr[0] = null
with pytest.raises(TypeError, match=msg):
arr[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser[0] = null
with pytest.raises(TypeError, match=msg):
ser[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser.iloc[0] = null
with pytest.raises(TypeError, match=msg):
ser.iloc[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
df.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df.iloc[:2, 0] = [null, null]
# Multi-Block
df2 = df.copy()
df2["B"] = ser.copy()
with pytest.raises(TypeError, match=msg):
df2.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df2.iloc[:2, 0] = [null, null]
def test_loc_expand_empty_frame_keep_index_name(self):
# GH#45621
df = DataFrame(columns=["b"], index=Index([], name="a"))
df.loc[0] = 1
expected = DataFrame({"b": [1]}, index=Index([0], name="a"))
tm.assert_frame_equal(df, expected)
def test_loc_expand_empty_frame_keep_midx_names(self):
# GH#46317
df = DataFrame(
columns=["d"], index=MultiIndex.from_tuples([], names=["a", "b", "c"])
)
df.loc[(1, 2, 3)] = "foo"
expected = DataFrame(
{"d": ["foo"]},
index=MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"]),
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"val, idxr",
[
("x", "a"),
("x", ["a"]),
(1, "a"),
(1, ["a"]),
],
)
def test_loc_setitem_rhs_frame(self, idxr, val):
# GH#47578
df = DataFrame({"a": [1, 2]})
with pytest.raises(TypeError, match="Invalid value"):
df.loc[:, idxr] = DataFrame({"a": [val, 11]}, index=[1, 2])
def test_iloc_setitem_enlarge_no_warning(self):
# GH#47381
df = DataFrame(columns=["a", "b"])
expected = df.copy()
view = df[:]
df.iloc[:, 0] = np.array([1, 2], dtype=np.float64)
tm.assert_frame_equal(view, expected)
def test_loc_internals_not_updated_correctly(self):
# GH#47867 all steps are necessary to reproduce the initial bug
df = DataFrame(
{"bool_col": True, "a": 1, "b": 2.5},
index=MultiIndex.from_arrays([[1, 2], [1, 2]], names=["idx1", "idx2"]),
)
idx = [(1, 1)]
df["c"] = 3
df.loc[idx, "c"] = 0
df.loc[idx, "c"]
df.loc[idx, ["a", "b"]]
df.loc[idx, "c"] = 15
result = df.loc[idx, "c"]
expected = df = Series(
15,
index=MultiIndex.from_arrays([[1], [1]], names=["idx1", "idx2"]),
name="c",
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("val", [None, [None], pd.NA, [pd.NA]])
def test_iloc_setitem_string_list_na(self, val):
# GH#45469
df = DataFrame({"a": ["a", "b", "c"]}, dtype="string")
df.iloc[[0], :] = val
expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string")
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [None, pd.NA])
def test_iloc_setitem_string_na(self, val):
# GH#45469
df = DataFrame({"a": ["a", "b", "c"]}, dtype="string")
df.iloc[0, :] = val
expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string")
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("func", [list, Series, np.array])
def test_iloc_setitem_ea_null_slice_length_one_list(self, func):
# GH#48016
df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
df.iloc[:, func([0])] = 5
expected = DataFrame({"a": [5, 5, 5]}, dtype="Int64")
tm.assert_frame_equal(df, expected)
def test_loc_named_tuple_for_midx(self):
# GH#48124
df = DataFrame(
index=MultiIndex.from_product(
[["A", "B"], ["a", "b", "c"]], names=["first", "second"]
)
)
indexer_tuple = namedtuple("Indexer", df.index.names)
idxr = indexer_tuple(first="A", second=["a", "b"])
result = df.loc[idxr, :]
expected = DataFrame(
index=MultiIndex.from_tuples(
[("A", "a"), ("A", "b")], names=["first", "second"]
)
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("indexer", [["a"], "a"])
@pytest.mark.parametrize("col", [{}, {"b": 1}])
def test_set_2d_casting_date_to_int(self, col, indexer):
# GH#49159
df = DataFrame(
{"a": [Timestamp("2022-12-29"), Timestamp("2022-12-30")], **col},
)
df.loc[[1], indexer] = df["a"] + pd.Timedelta(days=1)
expected = DataFrame(
{"a": [Timestamp("2022-12-29"), Timestamp("2022-12-31")], **col},
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("has_ref", [True, False])
@pytest.mark.parametrize("col", [{}, {"name": "a"}])
def test_loc_setitem_reordering_with_all_true_indexer(self, col, has_ref):
# GH#48701
n = 17
df = DataFrame({**col, "x": range(n), "y": range(n)})
value = df[["x", "y"]].copy()
expected = df.copy()
if has_ref:
view = df[:] # noqa: F841
df.loc[n * [True], ["x", "y"]] = value
tm.assert_frame_equal(df, expected)
def test_loc_rhs_empty_warning(self):
# GH48480
df = DataFrame(columns=["a", "b"])
expected = df.copy()
rhs = DataFrame(columns=["a"])
with tm.assert_produces_warning(None):
df.loc[:, "a"] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_ea_series_indexer(self):
# GH#49521
df = DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
indexer = Series([0, 1], dtype="Int64")
row_indexer = Series([1], dtype="Int64")
result = df.iloc[row_indexer, indexer]
expected = DataFrame([[5, 6]], index=range(1, 2))
tm.assert_frame_equal(result, expected)
result = df.iloc[row_indexer.values, indexer.values]
tm.assert_frame_equal(result, expected)
def test_iloc_ea_series_indexer_with_na(self):
# GH#49521
df = DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
indexer = Series([0, pd.NA], dtype="Int64")
msg = "cannot convert"
with pytest.raises(ValueError, match=msg):
df.iloc[:, indexer]
with pytest.raises(ValueError, match=msg):
df.iloc[:, indexer.values]
@pytest.mark.parametrize("indexer", [True, (True,)])
@pytest.mark.parametrize("dtype", [bool, "boolean"])
def test_loc_bool_multiindex(self, performance_warning, dtype, indexer):
# GH#47687
midx = MultiIndex.from_arrays(
[
Series([True, True, False, False], dtype=dtype),
Series([True, False, True, False], dtype=dtype),
],
names=["a", "b"],
)
df = DataFrame({"c": [1, 2, 3, 4]}, index=midx)
with tm.maybe_produces_warning(performance_warning, isinstance(indexer, tuple)):
result = df.loc[indexer]
expected = DataFrame(
{"c": [1, 2]}, index=Index([True, False], name="b", dtype=dtype)
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("utc", [False, True])
@pytest.mark.parametrize("indexer", ["date", ["date"]])
def test_loc_datetime_assignment_dtype_does_not_change(self, utc, indexer):
# GH#49837
df = DataFrame(
{
"date": to_datetime(
[datetime(2022, 1, 20), datetime(2022, 1, 22)], utc=utc
),
"update": [True, False],
}
)
expected = df.copy(deep=True)
update_df = df[df["update"]]
df.loc[df["update"], indexer] = update_df["date"]
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer, idx", [(tm.loc, 1), (tm.iloc, 2)])
def test_setitem_value_coercing_dtypes(self, indexer, idx):
# GH#50467
df = DataFrame([["1", np.nan], ["2", np.nan], ["3", np.nan]], dtype=object)
rhs = DataFrame([[1, np.nan], [2, np.nan]])
indexer(df)[:idx, :] = rhs
expected = DataFrame([[1, np.nan], [2, np.nan], ["3", np.nan]], dtype=object)
tm.assert_frame_equal(df, expected)
def test_big_endian_support_selecting_columns(self):
# GH#57457
columns = ["a"]
data = [np.array([1, 2], dtype=">f8")]
df = DataFrame(dict(zip(columns, data)))
result = df[df.columns]
dfexp = DataFrame({"a": [1, 2]}, dtype=">f8")
expected = dfexp[dfexp.columns]
tm.assert_frame_equal(result, expected)
| TestDataFrameIndexing |
python | fastapi__sqlmodel | tests/test_enums_models.py | {
"start": 64,
"end": 121
} | class ____(str, enum.Enum):
A = "A"
B = "B"
| MyEnum1 |
python | getsentry__sentry | tests/sentry/deletions/test_file.py | {
"start": 325,
"end": 5494
} | class ____(TestCase):
def test_get_query_filter_orphaned_release_file(self) -> None:
"""Test that orphaned release.file type Files are selected for deletion"""
project = self.create_project()
self.create_release(project=project)
# Create an orphaned release.file (no ReleaseFile pointing to it)
old_timestamp = timezone.now() - timedelta(days=91)
orphaned_file = File.objects.create(
name="orphaned.js",
type="release.file",
timestamp=old_timestamp,
)
# Get the deletion task and query filter
task = FileDeletionTask(
manager=None, # type: ignore[arg-type]
model=File,
query={},
)
query_filter = task.get_query_filter()
# Apply the filter to get Files that should be deleted
files_to_delete = File.objects.filter(query_filter)
assert orphaned_file in files_to_delete
def test_get_query_filter_does_not_select_referenced_file(self) -> None:
"""Test that Files referenced by ReleaseFile are NOT selected for deletion"""
project = self.create_project()
release = self.create_release(project=project)
# Create a File and ReleaseFile pointing to it
old_timestamp = timezone.now() - timedelta(days=91)
referenced_file = File.objects.create(
name="referenced.js",
type="release.file",
timestamp=old_timestamp,
)
ReleaseFile.objects.create(
organization_id=project.organization_id,
release_id=release.id,
file=referenced_file,
name="referenced.js",
ident="abc123",
)
# Get the deletion task and query filter
task = FileDeletionTask(
manager=None, # type: ignore[arg-type]
model=File,
query={},
)
query_filter = task.get_query_filter()
# Apply the filter
files_to_delete = File.objects.filter(query_filter)
assert referenced_file not in files_to_delete
def test_get_query_filter_does_not_select_recent_files(self) -> None:
"""Test that recent Files are NOT selected even if orphaned"""
# Create an orphaned file but with recent timestamp
recent_file = File.objects.create(
name="recent.js",
type="release.file",
timestamp=timezone.now() - timedelta(days=30), # Only 30 days old
)
# Get the deletion task and query filter
task = FileDeletionTask(
manager=None, # type: ignore[arg-type]
model=File,
query={},
)
query_filter = task.get_query_filter()
# Apply the filter
files_to_delete = File.objects.filter(query_filter)
assert recent_file not in files_to_delete
def test_get_query_filter_artifact_index_files(self) -> None:
"""Test that orphaned release.artifact-index Files are selected"""
old_timestamp = timezone.now() - timedelta(days=91)
orphaned_index = File.objects.create(
name="artifact-index.json",
type="release.artifact-index",
timestamp=old_timestamp,
)
task = FileDeletionTask(
manager=None, # type: ignore[arg-type]
model=File,
query={},
)
query_filter = task.get_query_filter()
files_to_delete = File.objects.filter(query_filter)
assert orphaned_index in files_to_delete
def test_get_query_filter_does_not_select_other_file_types(self) -> None:
"""Test that non-release file types are NOT selected"""
old_timestamp = timezone.now() - timedelta(days=91)
# Create files with different types
artifact_bundle_file = File.objects.create(
name="bundle.zip",
type="artifact.bundle",
timestamp=old_timestamp,
)
debug_file = File.objects.create(
name="debug.sym",
type="debug.file",
timestamp=old_timestamp,
)
task = FileDeletionTask(
manager=None, # type: ignore[arg-type]
model=File,
query={},
)
query_filter = task.get_query_filter()
files_to_delete = File.objects.filter(query_filter)
assert artifact_bundle_file not in files_to_delete
assert debug_file not in files_to_delete
def test_get_child_relations(self) -> None:
"""Test that FileBlobIndex records are returned as child relations"""
file = File.objects.create(
name="test.js",
type="release.file",
)
task = FileDeletionTask(
manager=None, # type: ignore[arg-type]
model=File,
query={},
)
child_relations = task.get_child_relations(file)
# Should have one relation for FileBlobIndex
assert len(child_relations) == 1
assert child_relations[0].params["model"] == FileBlobIndex
assert child_relations[0].params["query"] == {"file_id": file.id}
| FileDeletionTaskTest |
python | davidhalter__jedi | test/refactor/extract_function.py | {
"start": 8278,
"end": 8629
} | class ____:
# comment
def ab(self, b):
#foo
local1 = 3
local2 = 4
return local1 * glob1 * b
# bar
def f(self, b, c):
#? 11 text {'new_name': 'ab', 'until_line': 11, 'until_column': 10}
return self.ab(b)
# -------------------------------------------------- in-method-range-3
glob1 = 1
| X |
python | walkccc__LeetCode | solutions/2145. Count the Hidden Sequences/2145-2.py | {
"start": 0,
"end": 352
} | class ____:
def numberOfArrays(
self,
differences: list[int],
lower: int,
upper: int,
) -> int:
prefix = 0
mn = 0 # Starts from 0.
mx = 0 # Starts from 0.
for d in differences:
prefix += d
mn = min(mn, prefix)
mx = max(mx, prefix)
return max(0, (upper - lower) - (mx - mn) + 1)
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 68537,
"end": 72410
} | class ____(Request):
"""
Scroll through task events, sorted by timestamp
:param task: Task ID
:type task: str
:param order: 'asc' (default) or 'desc'.
:type order: str
:param scroll_id: Pass this value on next call to get next page
:type scroll_id: str
:param batch_size: Number of events to return each time (default 500)
:type batch_size: int
:param event_type: Return only events of this type
:type event_type: str
"""
_service = "events"
_action = "get_task_events"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"batch_size": {
"description": "Number of events to return each time (default 500)",
"type": "integer",
},
"event_type": {
"description": "Return only events of this type",
"type": "string",
},
"order": {
"description": "'asc' (default) or 'desc'.",
"enum": ["asc", "desc"],
"type": "string",
},
"scroll_id": {
"description": "Pass this value on next call to get next page",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
order: Optional[str] = None,
scroll_id: Optional[str] = None,
batch_size: Optional[int] = None,
event_type: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetTaskEventsRequest, self).__init__(**kwargs)
self.task = task
self.order = order
self.scroll_id = scroll_id
self.batch_size = batch_size
self.event_type = event_type
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("order")
def order(self) -> Optional[str]:
return self._property_order
@order.setter
def order(self, value: Optional[str]) -> None:
if value is None:
self._property_order = None
return
self.assert_isinstance(value, "order", six.string_types)
self._property_order = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("batch_size")
def batch_size(self) -> Optional[int]:
return self._property_batch_size
@batch_size.setter
def batch_size(self, value: Optional[int]) -> None:
if value is None:
self._property_batch_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "batch_size", six.integer_types)
self._property_batch_size = value
@schema_property("event_type")
def event_type(self) -> Optional[str]:
return self._property_event_type
@event_type.setter
def event_type(self, value: Optional[str]) -> None:
if value is None:
self._property_event_type = None
return
self.assert_isinstance(value, "event_type", six.string_types)
self._property_event_type = value
| GetTaskEventsRequest |
python | pennersr__django-allauth | tests/apps/account/test_logout.py | {
"start": 947,
"end": 2245
} | class ____(TestCase):
@override_settings(ACCOUNT_LOGOUT_ON_GET=True)
def test_logout_view_on_get(self):
c, resp = self._logout_view("get")
self.assertTemplateUsed(resp, "account/messages/logged_out.txt")
@override_settings(ACCOUNT_LOGOUT_ON_GET=False)
def test_logout_view_on_post(self):
c, resp = self._logout_view("get")
self.assertTemplateUsed(
resp, "account/logout.%s" % app_settings.TEMPLATE_EXTENSION
)
receiver_mock = Mock()
user_logged_out.connect(receiver_mock)
resp = c.post(reverse("account_logout"))
self.assertTemplateUsed(resp, "account/messages/logged_out.txt")
receiver_mock.assert_called_once_with(
sender=get_user_model(),
request=resp.wsgi_request,
user=get_user_model().objects.get(username="john"),
signal=user_logged_out,
)
user_logged_out.disconnect(receiver_mock)
def _logout_view(self, method):
c = Client()
user = get_user_model().objects.create(username="john", is_active=True)
user.set_password("doe")
user.save()
c = Client()
c.login(username="john", password="doe")
return c, getattr(c, method)(reverse("account_logout"))
| LogoutTests |
python | getsentry__sentry | tests/sentry/identity/test_oauth2.py | {
"start": 930,
"end": 6297
} | class ____(TestCase):
def setUp(self) -> None:
sentry.identity.register(DummyProvider)
super().setUp()
self.request = RequestFactory().get("/")
self.request.subdomain = None
def tearDown(self) -> None:
super().tearDown()
sentry.identity.unregister(DummyProvider)
@cached_property
def view(self):
return OAuth2CallbackView(
access_token_url="https://example.org/oauth/token",
client_id=123456,
client_secret="secret-value",
)
@responses.activate
def test_exchange_token_success(self, mock_record: MagicMock) -> None:
responses.add(
responses.POST, "https://example.org/oauth/token", json={"token": "a-fake-token"}
)
pipeline = IdentityPipeline(request=self.request, provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(self.request, pipeline, code)
assert "token" in result
assert "a-fake-token" == result["token"]
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "https://example.org/oauth/token"
data = dict(parse_qsl(responses.calls[0].request.body))
assert data == {
"client_id": "123456",
"client_secret": "secret-value",
"code": "auth-code",
"grant_type": "authorization_code",
"redirect_uri": "http://testserver/extensions/default/setup/",
}
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
@responses.activate
def test_exchange_token_success_customer_domains(self, mock_record: MagicMock) -> None:
responses.add(
responses.POST, "https://example.org/oauth/token", json={"token": "a-fake-token"}
)
self.request.subdomain = "albertos-apples"
pipeline = IdentityPipeline(request=self.request, provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(self.request, pipeline, code)
assert "token" in result
assert "a-fake-token" == result["token"]
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "https://example.org/oauth/token"
data = dict(parse_qsl(responses.calls[0].request.body))
assert data == {
"client_id": "123456",
"client_secret": "secret-value",
"code": "auth-code",
"grant_type": "authorization_code",
"redirect_uri": "http://testserver/extensions/default/setup/",
}
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
@responses.activate
def test_exchange_token_ssl_error(self, mock_record: MagicMock) -> None:
def ssl_error(request):
raise SSLError("Could not build connection")
responses.add_callback(
responses.POST, "https://example.org/oauth/token", callback=ssl_error
)
pipeline = IdentityPipeline(request=self.request, provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(self.request, pipeline, code)
assert "token" not in result
assert "error" in result
assert "error_description" in result
assert "SSL" in result["error_description"]
assert_failure_metric(mock_record, "ssl_error")
@responses.activate
def test_connection_error(self, mock_record: MagicMock) -> None:
def connection_error(request):
raise ConnectionError("Name or service not known")
responses.add_callback(
responses.POST, "https://example.org/oauth/token", callback=connection_error
)
pipeline = IdentityPipeline(request=self.request, provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(self.request, pipeline, code)
assert "token" not in result
assert "error" in result
assert "connect" in result["error"]
assert "error_description" in result
assert_failure_metric(mock_record, "connection_error")
@responses.activate
def test_exchange_token_no_json(self, mock_record: MagicMock) -> None:
responses.add(responses.POST, "https://example.org/oauth/token", body="")
pipeline = IdentityPipeline(request=self.request, provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(self.request, pipeline, code)
assert "token" not in result
assert "error" in result
assert "error_description" in result
assert "JSON" in result["error_description"]
assert_failure_metric(mock_record, "json_error")
@responses.activate
def test_api_error(self, mock_record: MagicMock) -> None:
responses.add(
responses.POST,
"https://example.org/oauth/token",
json={"token": "a-fake-token"},
status=401,
)
pipeline = IdentityPipeline(request=self.request, provider_key="dummy")
code = "auth-code"
result = self.view.exchange_token(self.request, pipeline, code)
assert "token" not in result
assert "error" in result
assert "401" in result["error"]
assert_failure_metric(mock_record, ApiUnauthorized('{"token": "a-fake-token"}'))
@control_silo_test
| OAuth2CallbackViewTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py | {
"start": 3576,
"end": 3827
} | class ____(models.Model):
"""Model with type-annotated abstract = True - should not trigger DJ008"""
new_field = models.CharField(max_length=10)
class Meta(TypedModelMeta):
abstract: ClassVar[bool] = True
| TypeAnnotatedAbstractModel1 |
python | walkccc__LeetCode | solutions/2939. Maximum Xor Product/2939.py | {
"start": 0,
"end": 289
} | class ____:
def maximumXorProduct(self, a: int, b: int, n: int) -> int:
MOD = 1_000_000_007
for bit in (2**i for i in range(n)):
# Pick a bit if it makes min(a, b) larger.
if a * b < (a ^ bit) * (b ^ bit):
a ^= bit
b ^= bit
return a * b % MOD
| Solution |
python | gevent__gevent | src/gevent/tests/test__backdoor.py | {
"start": 755,
"end": 1697
} | class ____(socket.socket):
__slots__ = ('banner',)
def __init__(self, *args, **kwargs):
self.banner = None
super(SocketWithBanner, self).__init__(*args, **kwargs)
def __enter__(self):
return socket.socket.__enter__(self)
def __exit__(self, t, v, tb):
return socket.socket.__exit__(self, t, v, tb)
@greentest.skipOnAppVeyor(
"With the update to libev 4.31 and potentially closing sockets in the background, "
"alternate tests started hanging on appveyor. Something like .E.E.E. "
"See https://ci.appveyor.com/project/denik/gevent/build/job/n9fynkoyt2bvk8b5 "
"It's not clear why, but presumably a socket isn't getting closed and a watcher is tied "
"to the wrong file descriptor. I haven't been able to reproduce. If it were a systemic "
"problem I'd expect to see more failures, so it is probably specific to resource management "
"in this test."
)
| SocketWithBanner |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.