language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | zarr-developers__zarr-python | src/zarr/registry.py | {
"start": 1098,
"end": 11490
} | class ____(dict[str, type[T]], Generic[T]):
def __init__(self) -> None:
super().__init__()
self.lazy_load_list: list[EntryPoint] = []
def lazy_load(self, use_entrypoint_name: bool = False) -> None:
for e in self.lazy_load_list:
self.register(e.load(), qualname=e.name if use_entrypoint_name else None)
self.lazy_load_list.clear()
def register(self, cls: type[T], qualname: str | None = None) -> None:
if qualname is None:
qualname = fully_qualified_name(cls)
self[qualname] = cls
__codec_registries: dict[str, Registry[Codec]] = defaultdict(Registry)
__pipeline_registry: Registry[CodecPipeline] = Registry()
__buffer_registry: Registry[Buffer] = Registry()
__ndbuffer_registry: Registry[NDBuffer] = Registry()
__chunk_key_encoding_registry: Registry[ChunkKeyEncoding] = Registry()
"""
The registry module is responsible for managing implementations of codecs,
pipelines, buffers, ndbuffers, and chunk key encodings and collecting them from entrypoints.
The implementation used is determined by the config.
The registry module is also responsible for managing dtypes.
"""
def _collect_entrypoints() -> list[Registry[Any]]:
"""
Collects codecs, pipelines, dtypes, buffers and ndbuffers from entrypoints.
Entry points can either be single items or groups of items.
Allowed syntax for entry_points.txt is e.g.
[zarr.codecs]
gzip = package:EntrypointGzipCodec1
[zarr.codecs.gzip]
some_name = package:EntrypointGzipCodec2
another = package:EntrypointGzipCodec3
[zarr]
buffer = package:TestBuffer1
[zarr.buffer]
xyz = package:TestBuffer2
abc = package:TestBuffer3
...
"""
entry_points = get_entry_points()
__buffer_registry.lazy_load_list.extend(entry_points.select(group="zarr.buffer"))
__buffer_registry.lazy_load_list.extend(entry_points.select(group="zarr", name="buffer"))
__ndbuffer_registry.lazy_load_list.extend(entry_points.select(group="zarr.ndbuffer"))
__ndbuffer_registry.lazy_load_list.extend(entry_points.select(group="zarr", name="ndbuffer"))
data_type_registry._lazy_load_list.extend(entry_points.select(group="zarr.data_type"))
data_type_registry._lazy_load_list.extend(entry_points.select(group="zarr", name="data_type"))
__chunk_key_encoding_registry.lazy_load_list.extend(
entry_points.select(group="zarr.chunk_key_encoding")
)
__chunk_key_encoding_registry.lazy_load_list.extend(
entry_points.select(group="zarr", name="chunk_key_encoding")
)
__pipeline_registry.lazy_load_list.extend(entry_points.select(group="zarr.codec_pipeline"))
__pipeline_registry.lazy_load_list.extend(
entry_points.select(group="zarr", name="codec_pipeline")
)
for e in entry_points.select(group="zarr.codecs"):
__codec_registries[e.name].lazy_load_list.append(e)
for group in entry_points.groups:
if group.startswith("zarr.codecs."):
codec_name = group.split(".")[2]
__codec_registries[codec_name].lazy_load_list.extend(entry_points.select(group=group))
return [
*__codec_registries.values(),
__pipeline_registry,
__buffer_registry,
__ndbuffer_registry,
__chunk_key_encoding_registry,
]
def _reload_config() -> None:
config.refresh()
def fully_qualified_name(cls: type) -> str:
module = cls.__module__
return module + "." + cls.__qualname__
def register_codec(key: str, codec_cls: type[Codec], *, qualname: str | None = None) -> None:
if key not in __codec_registries:
__codec_registries[key] = Registry()
__codec_registries[key].register(codec_cls, qualname=qualname)
def register_pipeline(pipe_cls: type[CodecPipeline]) -> None:
__pipeline_registry.register(pipe_cls)
def register_ndbuffer(cls: type[NDBuffer], qualname: str | None = None) -> None:
__ndbuffer_registry.register(cls, qualname)
def register_buffer(cls: type[Buffer], qualname: str | None = None) -> None:
__buffer_registry.register(cls, qualname)
def register_chunk_key_encoding(key: str, cls: type) -> None:
__chunk_key_encoding_registry.register(cls, key)
def get_codec_class(key: str, reload_config: bool = False) -> type[Codec]:
if reload_config:
_reload_config()
if key in __codec_registries:
# logger.debug("Auto loading codec '%s' from entrypoint", codec_id)
__codec_registries[key].lazy_load()
codec_classes = __codec_registries[key]
if not codec_classes:
raise KeyError(key)
config_entry = config.get("codecs", {}).get(key)
if config_entry is None:
if len(codec_classes) == 1:
return next(iter(codec_classes.values()))
warnings.warn(
f"Codec '{key}' not configured in config. Selecting any implementation.",
stacklevel=2,
category=ZarrUserWarning,
)
return list(codec_classes.values())[-1]
selected_codec_cls = codec_classes[config_entry]
if selected_codec_cls:
return selected_codec_cls
raise KeyError(key)
def _resolve_codec(data: dict[str, JSON]) -> Codec:
"""
Get a codec instance from a dict representation of that codec.
"""
# TODO: narrow the type of the input to only those dicts that map on to codec class instances.
return get_codec_class(data["name"]).from_dict(data) # type: ignore[arg-type]
def _parse_bytes_bytes_codec(data: dict[str, JSON] | Codec) -> BytesBytesCodec:
"""
Normalize the input to a ``BytesBytesCodec`` instance.
If the input is already a ``BytesBytesCodec``, it is returned as is. If the input is a dict, it
is converted to a ``BytesBytesCodec`` instance via the ``_resolve_codec`` function.
"""
from zarr.abc.codec import BytesBytesCodec
if isinstance(data, dict):
result = _resolve_codec(data)
if not isinstance(result, BytesBytesCodec):
msg = f"Expected a dict representation of a BytesBytesCodec; got a dict representation of a {type(result)} instead."
raise TypeError(msg)
else:
if not isinstance(data, BytesBytesCodec):
raise TypeError(f"Expected a BytesBytesCodec. Got {type(data)} instead.")
result = data
return result
def _parse_array_bytes_codec(data: dict[str, JSON] | Codec) -> ArrayBytesCodec:
"""
Normalize the input to a ``ArrayBytesCodec`` instance.
If the input is already a ``ArrayBytesCodec``, it is returned as is. If the input is a dict, it
is converted to a ``ArrayBytesCodec`` instance via the ``_resolve_codec`` function.
"""
from zarr.abc.codec import ArrayBytesCodec
if isinstance(data, dict):
result = _resolve_codec(data)
if not isinstance(result, ArrayBytesCodec):
msg = f"Expected a dict representation of an ArrayBytesCodec; got a dict representation of a {type(result)} instead."
raise TypeError(msg)
else:
if not isinstance(data, ArrayBytesCodec):
raise TypeError(f"Expected an ArrayBytesCodec. Got {type(data)} instead.")
result = data
return result
def _parse_array_array_codec(data: dict[str, JSON] | Codec) -> ArrayArrayCodec:
"""
Normalize the input to a ``ArrayArrayCodec`` instance.
If the input is already a ``ArrayArrayCodec``, it is returned as is. If the input is a dict, it
is converted to a ``ArrayArrayCodec`` instance via the ``_resolve_codec`` function.
"""
from zarr.abc.codec import ArrayArrayCodec
if isinstance(data, dict):
result = _resolve_codec(data)
if not isinstance(result, ArrayArrayCodec):
msg = f"Expected a dict representation of an ArrayArrayCodec; got a dict representation of a {type(result)} instead."
raise TypeError(msg)
else:
if not isinstance(data, ArrayArrayCodec):
raise TypeError(f"Expected an ArrayArrayCodec. Got {type(data)} instead.")
result = data
return result
def get_pipeline_class(reload_config: bool = False) -> type[CodecPipeline]:
if reload_config:
_reload_config()
__pipeline_registry.lazy_load()
path = config.get("codec_pipeline.path")
pipeline_class = __pipeline_registry.get(path)
if pipeline_class:
return pipeline_class
raise BadConfigError(
f"Pipeline class '{path}' not found in registered pipelines: {list(__pipeline_registry)}."
)
def get_buffer_class(reload_config: bool = False) -> type[Buffer]:
if reload_config:
_reload_config()
__buffer_registry.lazy_load()
path = config.get("buffer")
buffer_class = __buffer_registry.get(path)
if buffer_class:
return buffer_class
raise BadConfigError(
f"Buffer class '{path}' not found in registered buffers: {list(__buffer_registry)}."
)
def get_ndbuffer_class(reload_config: bool = False) -> type[NDBuffer]:
if reload_config:
_reload_config()
__ndbuffer_registry.lazy_load()
path = config.get("ndbuffer")
ndbuffer_class = __ndbuffer_registry.get(path)
if ndbuffer_class:
return ndbuffer_class
raise BadConfigError(
f"NDBuffer class '{path}' not found in registered buffers: {list(__ndbuffer_registry)}."
)
def get_chunk_key_encoding_class(key: str) -> type[ChunkKeyEncoding]:
__chunk_key_encoding_registry.lazy_load(use_entrypoint_name=True)
if key not in __chunk_key_encoding_registry:
raise KeyError(
f"Chunk key encoding '{key}' not found in registered chunk key encodings: {list(__chunk_key_encoding_registry)}."
)
return __chunk_key_encoding_registry[key]
_collect_entrypoints()
def get_numcodec(data: CodecJSON_V2[str]) -> Numcodec:
"""
Resolve a numcodec codec from the numcodecs registry.
This requires the Numcodecs package to be installed.
Parameters
----------
data : CodecJSON_V2
The JSON metadata for the codec.
Returns
-------
codec : Numcodec
Examples
--------
```python
from zarr.registry import get_numcodec
codec = get_numcodec({'id': 'zlib', 'level': 1})
codec
# Zlib(level=1)
```
"""
from numcodecs.registry import get_codec
return get_codec(data) # type: ignore[no-any-return]
| Registry |
python | PyCQA__pyflakes | pyflakes/test/test_api.py | {
"start": 6356,
"end": 9375
} | class ____(TestCase):
"""
Tests for L{Reporter}.
"""
def test_syntaxError(self):
"""
C{syntaxError} reports that there was a syntax error in the source
file. It reports to the error stream and includes the filename, line
number, error message, actual line of source and a caret pointing to
where the error is.
"""
err = io.StringIO()
reporter = Reporter(None, err)
reporter.syntaxError('foo.py', 'a problem', 3, 8, 'bad line of source')
self.assertEqual(
("foo.py:3:8: a problem\n"
"bad line of source\n"
" ^\n"),
err.getvalue())
def test_syntaxErrorNoOffset(self):
"""
C{syntaxError} doesn't include a caret pointing to the error if
C{offset} is passed as C{None}.
"""
err = io.StringIO()
reporter = Reporter(None, err)
reporter.syntaxError('foo.py', 'a problem', 3, None,
'bad line of source')
self.assertEqual(
("foo.py:3: a problem\n"
"bad line of source\n"),
err.getvalue())
def test_syntaxErrorNoText(self):
"""
C{syntaxError} doesn't include text or nonsensical offsets if C{text} is C{None}.
This typically happens when reporting syntax errors from stdin.
"""
err = io.StringIO()
reporter = Reporter(None, err)
reporter.syntaxError('<stdin>', 'a problem', 0, 0, None)
self.assertEqual(("<stdin>:1:1: a problem\n"), err.getvalue())
def test_multiLineSyntaxError(self):
"""
If there's a multi-line syntax error, then we only report the last
line. The offset is adjusted so that it is relative to the start of
the last line.
"""
err = io.StringIO()
lines = [
'bad line of source',
'more bad lines of source',
]
reporter = Reporter(None, err)
reporter.syntaxError('foo.py', 'a problem', 3, len(lines[0]) + 7,
'\n'.join(lines))
self.assertEqual(
("foo.py:3:25: a problem\n" +
lines[-1] + "\n" +
" " * 24 + "^\n"),
err.getvalue())
def test_unexpectedError(self):
"""
C{unexpectedError} reports an error processing a source file.
"""
err = io.StringIO()
reporter = Reporter(None, err)
reporter.unexpectedError('source.py', 'error message')
self.assertEqual('source.py: error message\n', err.getvalue())
def test_flake(self):
"""
C{flake} reports a code warning from Pyflakes. It is exactly the
str() of a L{pyflakes.messages.Message}.
"""
out = io.StringIO()
reporter = Reporter(out, None)
message = UnusedImport('foo.py', Node(42), 'bar')
reporter.flake(message)
self.assertEqual(out.getvalue(), f"{message}\n")
| TestReporter |
python | coleifer__peewee | tests/postgres.py | {
"start": 27064,
"end": 28729
} | class ____(ModelTestCase):
database = db
requires = [IDAlways, IDByDefault]
def test_identity_field_always(self):
iq = IDAlways.insert_many([(d,) for d in ('d1', 'd2', 'd3')])
curs = iq.execute()
self.assertEqual(list(curs), [(1,), (2,), (3,)])
# Cannot specify id when generate always is true.
with self.assertRaises(ProgrammingError):
with self.database.atomic():
IDAlways.create(id=10, data='d10')
query = IDAlways.select().order_by(IDAlways.id)
self.assertEqual(list(query.tuples()), [
(1, 'd1'), (2, 'd2'), (3, 'd3')])
def test_identity_field_by_default(self):
iq = IDByDefault.insert_many([(d,) for d in ('d1', 'd2', 'd3')])
curs = iq.execute()
self.assertEqual(list(curs), [(1,), (2,), (3,)])
# Cannot specify id when generate always is true.
IDByDefault.create(id=10, data='d10')
query = IDByDefault.select().order_by(IDByDefault.id)
self.assertEqual(list(query.tuples()), [
(1, 'd1'), (2, 'd2'), (3, 'd3'), (10, 'd10')])
def test_schema(self):
sql, params = IDAlways._schema._create_table(False).query()
self.assertEqual(sql, (
'CREATE TABLE "id_always" ("id" INT GENERATED ALWAYS AS IDENTITY '
'NOT NULL PRIMARY KEY, "data" VARCHAR(255) NOT NULL)'))
sql, params = IDByDefault._schema._create_table(False).query()
self.assertEqual(sql, (
'CREATE TABLE "id_by_default" ("id" INT GENERATED BY DEFAULT AS '
'IDENTITY NOT NULL PRIMARY KEY, "data" VARCHAR(255) NOT NULL)'))
| TestIdentityField |
python | django-guardian__django-guardian | guardian/testapp/tests/test_admin.py | {
"start": 868,
"end": 1086
} | class ____(GuardedInlineAdminMixin, admin.StackedInline):
"""Test inline for UserProfile model using GuardedInlineAdminMixin."""
model = UserProfile
extra = 0
# Test admin class with inline
| UserProfileInline |
python | encode__django-rest-framework | tests/test_validation_error.py | {
"start": 774,
"end": 2161
} | class ____(TestCase):
def setUp(self):
self.DEFAULT_HANDLER = api_settings.EXCEPTION_HANDLER
def exception_handler(exc, request):
data = exc.get_full_details()
return Response(data, status=status.HTTP_400_BAD_REQUEST)
api_settings.EXCEPTION_HANDLER = exception_handler
self.expected_response_data = {
'char': [{
'message': 'This field is required.',
'code': 'required',
}],
'integer': [{
'message': 'This field is required.',
'code': 'required'
}],
}
def tearDown(self):
api_settings.EXCEPTION_HANDLER = self.DEFAULT_HANDLER
def test_class_based_view_exception_handler(self):
view = ErrorView.as_view()
request = factory.get('/', content_type='application/json')
response = view(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data == self.expected_response_data
def test_function_based_view_exception_handler(self):
view = error_view
request = factory.get('/', content_type='application/json')
response = view(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data == self.expected_response_data
| TestValidationErrorWithFullDetails |
python | weaviate__weaviate-python-client | weaviate/users/users.py | {
"start": 519,
"end": 587
} | class ____(UserBase):
user_type: UserTypes = UserTypes.OIDC
| UserOIDC |
python | Lightning-AI__lightning | src/lightning/pytorch/callbacks/gradient_accumulation_scheduler.py | {
"start": 1139,
"end": 6114
} | class ____(Callback):
r"""Change gradient accumulation factor according to scheduling.
Args:
scheduling: scheduling in format {epoch: accumulation_factor}
Note:
The argument scheduling is a dictionary. Each key represent an epoch and
its associated accumulation factor value.
Warning: Epoch are zero-indexed c.f it means if you want to change
the accumulation factor after 4 epochs, set ``Trainer(accumulate_grad_batches={4: factor})``
or ``GradientAccumulationScheduler(scheduling={4: factor})``.
For more info check the example below.
Raises:
TypeError:
If ``scheduling`` is an empty ``dict``,
or not all keys and values of ``scheduling`` are integers.
IndexError:
If ``minimal_epoch`` is less than 0.
Example::
>>> from lightning.pytorch import Trainer
>>> from lightning.pytorch.callbacks import GradientAccumulationScheduler
# from epoch 5, it starts accumulating every 2 batches. Here we have 4 instead of 5
# because epoch (key) should be zero-indexed.
>>> accumulator = GradientAccumulationScheduler(scheduling={4: 2})
>>> trainer = Trainer(callbacks=[accumulator])
"""
def __init__(self, scheduling: dict[int, int]):
super().__init__()
if not scheduling: # empty dict error
raise TypeError("Empty dict cannot be interpreted correct")
if any(not isinstance(key, int) or key < 0 for key in scheduling):
raise MisconfigurationException(
f"Epoch should be an int greater than or equal to 0. Got {list(scheduling.keys())}."
)
if any(not isinstance(value, int) or value < 1 for value in scheduling.values()):
raise MisconfigurationException(
f"Accumulation factor should be an int greater than 0. Got {list(scheduling.values())}."
)
minimal_epoch = min(scheduling.keys())
if minimal_epoch < 0:
raise IndexError(f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct")
if minimal_epoch != 0: # if user didn't define first epoch accumulation factor
scheduling.update({0: 1})
self.scheduling = scheduling
self.epochs = sorted(scheduling.keys())
def going_to_accumulate_grad_batches(self) -> bool:
return any(v > 1 for v in self.scheduling.values())
def get_accumulate_grad_batches(self, epoch: int) -> int:
accumulate_grad_batches = 1
for iter_epoch in reversed(self.epochs):
if epoch >= iter_epoch:
accumulate_grad_batches = self.scheduling[iter_epoch]
break
return accumulate_grad_batches
@override
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Performns a configuration validation before training starts and raises errors for incompatible settings."""
if not pl_module.automatic_optimization:
raise RuntimeError(
"""Automatic gradient accumulation and the `GradientAccumulationScheduler` is not supported for
manual optimization. Please remove the callback or switch to automatic optimization."""
)
overridden_optimizer_step = is_overridden("optimizer_step", pl_module)
overridden_optimizer_zero_grad = is_overridden("optimizer_zero_grad", pl_module)
going_to_accumulate_grad_batches = self.going_to_accumulate_grad_batches()
has_overridden_optimization_functions = overridden_optimizer_step or overridden_optimizer_zero_grad
if has_overridden_optimization_functions and going_to_accumulate_grad_batches:
rank_zero_warn(
"When using `Trainer(accumulate_grad_batches != 1)` and overriding"
" `LightningModule.optimizer_{step,zero_grad}`, the hooks will not be called on every batch"
" (rather, they are called on every optimization step)."
)
# local import to avoid circular import
from lightning.pytorch.strategies import DeepSpeedStrategy
if isinstance(trainer.strategy, DeepSpeedStrategy):
raise RuntimeError(
f"The `{type(trainer.strategy).__name__}` does not support `accumulate_grad_batches` changing"
" between epochs."
)
if trainer.accumulate_grad_batches != 1:
raise ValueError(
"You have set `accumulate_grad_batches` and are using the `GradientAccumulationScheduler`"
" callback. Either remove `accumulate_grad_batches` from the Trainer or remove the callback."
)
@override
def on_train_epoch_start(self, trainer: "pl.Trainer", *_: Any) -> None:
trainer.accumulate_grad_batches = self.get_accumulate_grad_batches(trainer.current_epoch)
| GradientAccumulationScheduler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/override1.py | {
"start": 305,
"end": 534
} | class ____:
def method3(self) -> None:
pass
@overload
def method5(self, x: int) -> int: ...
@overload
def method5(self, x: str) -> str: ...
def method5(self, x: int | str) -> int | str: ...
| ClassB |
python | numba__numba | numba/cuda/cudamath.py | {
"start": 3788,
"end": 3978
} | class ____(ConcreteTemplate):
cases = [
signature(types.UniTuple(types.float64, 2), types.float64),
signature(types.UniTuple(types.float32, 2), types.float32)
]
| Math_modf |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_collections.py | {
"start": 59949,
"end": 60331
} | class ____(MutableSet):
def __init__(self, it=()):
self.data = set(it)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __contains__(self, item):
return item in self.data
def add(self, item):
self.data.add(item)
def discard(self, item):
self.data.discard(item)
| WithSet |
python | ray-project__ray | python/ray/tests/autoscaler/test_providers.py | {
"start": 153,
"end": 1063
} | class ____(unittest.TestCase):
def test_node_providers(self):
for provider_name, provider_cls in _NODE_PROVIDERS.items():
config = {"module": "ray.autoscaler._private"}
try:
provider_cls(config)
except ImportError as e:
if f"ray.autoscaler.{provider_name}" in str(e):
self.fail(
f"Unexpected import error for provider {provider_name}: {e}"
)
def test_provider_pretty_names(self):
self.assertEqual(
set(_NODE_PROVIDERS.keys()), set(_PROVIDER_PRETTY_NAMES.keys())
)
def test_default_configs(self):
for config_loader in _DEFAULT_CONFIGS.values():
config_path = config_loader()
with open(config_path) as f:
yaml.safe_load(f)
if __name__ == "__main__":
unittest.main()
| TestProviders |
python | nmslib__hnswlib | tests/python/bindings_test_stress_mt_replace.py | {
"start": 54,
"end": 2831
} | class ____(unittest.TestCase):
def testRandomSelf(self):
dim = 16
num_elements = 1_000
max_num_elements = 2 * num_elements
# Generating sample data
# batch 1
first_id = 0
last_id = num_elements
labels1 = np.arange(first_id, last_id)
data1 = np.float32(np.random.random((num_elements, dim)))
# batch 2
first_id += num_elements
last_id += num_elements
labels2 = np.arange(first_id, last_id)
data2 = np.float32(np.random.random((num_elements, dim)))
# batch 3
first_id += num_elements
last_id += num_elements
labels3 = np.arange(first_id, last_id)
data3 = np.float32(np.random.random((num_elements, dim)))
for _ in range(100):
# Declaring index
hnsw_index = hnswlib.Index(space='l2', dim=dim)
hnsw_index.init_index(max_elements=max_num_elements, ef_construction=200, M=16, allow_replace_deleted=True)
hnsw_index.set_ef(100)
hnsw_index.set_num_threads(50)
# Add batch 1 and 2
hnsw_index.add_items(data1, labels1)
hnsw_index.add_items(data2, labels2) # maximum number of elements is reached
# Delete nearest neighbors of batch 2
labels2_deleted, _ = hnsw_index.knn_query(data2, k=1)
labels2_deleted_flat = labels2_deleted.flatten()
# delete probable duplicates from nearest neighbors
labels2_deleted_no_dup = set(labels2_deleted_flat)
for l in labels2_deleted_no_dup:
hnsw_index.mark_deleted(l)
labels1_found, _ = hnsw_index.knn_query(data1, k=1)
items = hnsw_index.get_items(labels1_found)
diff_with_gt_labels = np.mean(np.abs(data1 - items))
self.assertAlmostEqual(diff_with_gt_labels, 0, delta=1e-3)
labels2_after, _ = hnsw_index.knn_query(data2, k=1)
labels2_after_flat = labels2_after.flatten()
common = np.intersect1d(labels2_after_flat, labels2_deleted_flat)
self.assertTrue(common.size == 0)
# Replace deleted elements
# Maximum number of elements is reached therefore we cannot add new items
# but we can replace the deleted ones
# Note: there may be less than num_elements elements.
# As we could delete less than num_elements because of duplicates
num_duplicates = len(labels2_deleted) - len(labels2_deleted_no_dup)
labels3_tr = labels3[0:labels3.shape[0] - num_duplicates]
data3_tr = data3[0:data3.shape[0] - num_duplicates]
hnsw_index.add_items(data3_tr, labels3_tr, replace_deleted=True)
| RandomSelfTestCase |
python | getsentry__sentry | tests/sentry/notifications/test_notifications.py | {
"start": 2894,
"end": 25152
} | class ____(APITestCase):
"""
Enable Slack AND email notification settings for a user
"""
def setUp(self) -> None:
self.integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="slack",
name="Team A",
external_id="TXXXXXXX1",
metadata={
"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"installation_type": "born_as_bot",
},
)
self.idp = self.create_identity_provider(type="slack", external_id="TXXXXXXX1")
self.identity = Identity.objects.create(
external_id="UXXXXXXX1",
idp=self.idp,
user=self.user,
status=IdentityStatus.VALID,
scopes=[],
)
UserOption.objects.create(user=self.user, key="self_notifications", value="1")
self.login_as(self.user)
# modify settings
for type in ["workflow", "deploy", "alerts"]:
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type=type,
value="always",
)
responses.add_passthru(
settings.SENTRY_SNUBA + "/tests/entities/generic_metrics_counters/insert",
)
self.name = self.user.get_display_name()
self.short_id = self.group.qualified_short_id
def test_sends_note_notification(self, mock_post: MagicMock) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when a comment is created on an issue.
"""
# leave a comment
url = f"/api/0/issues/{self.group.id}/comments/"
with assume_test_silo_mode(SiloMode.REGION):
with self.tasks():
response = self.client.post(url, format="json", data={"text": "blah blah"})
assert response.status_code == 201, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# check the txt version
assert "blah blah" in msg.body
# check the html version
assert isinstance(msg.alternatives[0][0], str)
assert "blah blah</p></div>" in msg.alternatives[0][0]
blocks = orjson.loads(mock_post.call_args.kwargs["blocks"])
block = blocks[1]["text"]["text"]
footer = blocks[2]["elements"][0]["text"]
text = mock_post.call_args.kwargs["text"]
# check the Slack version
assert text == f"New comment by {self.name}"
assert self.group.title in block
title_link = block # removes emoji and <>
notification_uuid = get_notification_uuid(block)
assert (
f"http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=note_activity-slack¬ification_uuid={notification_uuid}"
) in title_link
assert title_link.split("\n")[-1] == "blah blah"
assert (
footer
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=note_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
def test_sends_unassignment_notification(self, mock_post: MagicMock) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when an issue is unassigned.
"""
url = f"/api/0/issues/{self.group.id}/"
with assume_test_silo_mode(SiloMode.REGION):
GroupAssignee.objects.create(
group=self.group,
project=self.project,
user_id=self.user.id,
date_added=timezone.now(),
)
with self.tasks():
response = self.client.put(url, format="json", data={"assignedTo": ""})
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# check the txt version
assert f"Unassigned\n\n{self.user.username} unassigned {self.short_id}" in msg.body
# check the html version
assert isinstance(msg.alternatives[0][0], str)
assert f"{self.user.username}</strong> unassigned" in msg.alternatives[0][0]
blocks = orjson.loads(mock_post.call_args.kwargs["blocks"])
block = blocks[1]["text"]["text"]
footer = blocks[3]["elements"][0]["text"]
text = mock_post.call_args.kwargs["text"]
assert text == f"Issue unassigned by {self.name}"
assert self.group.title in block
title_link = block[13:][1:-1] # removes emoji and <>
notification_uuid = get_notification_uuid(title_link)
assert (
footer
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=unassigned_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
def test_html_escape(self, mock_post: MagicMock) -> None:
other_user = self.create_user(name="<b>test</b>", is_staff=False, is_superuser=False)
activity = Activity(
project=self.project, data={"assignee": other_user.id}, group=self.group
)
notification = AssignedActivityNotification(activity)
html = notification.get_context()["html_description"]
assert "<b>test</b>" in html
assert "<b>test</b>" not in html
def test_regression_html_link(self, mock_post: MagicMock) -> None:
notification = RegressionActivityNotification(
Activity(
project=self.project,
group=self.group,
user_id=self.user.id,
type=ActivityType.SET_REGRESSION,
data={"version": "777"},
)
)
context = notification.get_context()
assert "as a regression in 777" in context["text_description"]
assert "as a regression in <a href=" in context["html_description"]
@patch("sentry.analytics.record")
def test_sends_resolution_notification(
self, record_analytics: MagicMock, mock_post: MagicMock
) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when an issue is resolved.
"""
url = f"/api/0/issues/{self.group.id}/"
with assume_test_silo_mode(SiloMode.REGION):
with self.tasks():
response = self.client.put(url, format="json", data={"status": "resolved"})
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# check the txt version
assert f"{self.user.username} marked {self.short_id} as resolved" in msg.body
# check the html version
assert isinstance(msg.alternatives[0][0], str)
assert f"{self.short_id}</a> as resolved</p>" in msg.alternatives[0][0]
blocks = orjson.loads(mock_post.call_args.kwargs["blocks"])
block = blocks[1]["text"]["text"]
footer = blocks[3]["elements"][0]["text"]
text = mock_post.call_args.kwargs["text"]
assert self.group.title in block
title_link = block[13:][1:-1] # removes emoji and <>
notification_uuid = get_notification_uuid(title_link)
assert (
text
== f"{self.name} marked <http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=activity_notification¬ification_uuid={notification_uuid}|{self.short_id}> as resolved"
)
assert (
footer
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
assert_any_analytics_event(
record_analytics,
EmailNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=self.group.id,
notification_uuid=notification_uuid,
category="",
id=0,
actor_type="User",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
assert_any_analytics_event(
record_analytics,
SlackIntegrationNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=self.group.id,
notification_uuid=notification_uuid,
actor_type="User",
category="",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
@patch("sentry.analytics.record")
def test_sends_deployment_notification(
self, record_analytics: MagicMock, mock_post: MagicMock
) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when a release is deployed.
"""
release = self.create_release()
version_parsed = self.version_parsed = parse_release(release.version)["description"]
with assume_test_silo_mode(SiloMode.REGION):
url = (
f"/api/0/organizations/{self.organization.slug}/releases/{release.version}/deploys/"
)
with self.tasks():
response = self.client.post(
url, format="json", data={"environment": self.environment.name}
)
assert response.status_code == 201, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# check the txt version
assert f"Version {version_parsed} was deployed to {self.environment.name} on" in msg.body
# check the html version
assert isinstance(msg.alternatives[0][0], str)
assert (
f"Version {version_parsed} was deployed to {self.environment.name}\n </h2>\n"
in msg.alternatives[0][0]
)
blocks = orjson.loads(mock_post.call_args.kwargs["blocks"])
footer = blocks[1]["elements"][0]["text"]
url = blocks[2]["elements"][0]["url"]
text = mock_post.call_args.kwargs["text"]
assert (
text
== f"Release {version_parsed} was deployed to {self.environment.name} for this project"
)
notification_uuid = get_notification_uuid(url)
assert url == (
f"http://testserver/organizations/{self.organization.slug}/releases/{release.version}/?project={self.project.id}&unselectedSeries=Healthy&referrer=release_activity¬ification_uuid={notification_uuid}"
)
assert (
footer
== f"{self.project.slug} | <http://testserver/settings/account/notifications/deploy/?referrer=release_activity-slack-user¬ification_uuid={notification_uuid}|Notification Settings>"
)
assert_any_analytics_event(
record_analytics,
EmailNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=None,
notification_uuid=notification_uuid,
category="",
id=0,
actor_type="User",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
assert_any_analytics_event(
record_analytics,
SlackIntegrationNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=None,
notification_uuid=notification_uuid,
actor_type="User",
category="",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
@patch("sentry.analytics.record")
def test_sends_regression_notification(
self, record_analytics: MagicMock, mock_post: MagicMock
) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when an issue regresses.
"""
# resolve and unresolve the issue
ts = time() - 300
with assume_test_silo_mode(SiloMode.REGION):
manager = EventManager(make_event(event_id="a" * 32, checksum="a" * 32, timestamp=ts))
with self.tasks():
event = manager.save(self.project.id)
assert event.group_id is not None
group = Group.objects.get(id=event.group_id)
group.status = GroupStatus.RESOLVED
group.substatus = None
group.save()
assert group.is_resolved()
manager = EventManager(
make_event(event_id="b" * 32, checksum="a" * 32, timestamp=ts + 50)
)
with self.tasks():
event2 = manager.save(self.project.id)
assert event.group_id == event2.group_id
group = Group.objects.get(id=group.id)
assert not group.is_resolved()
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# check the txt version
assert f"Sentry marked {group.qualified_short_id} as a regression" in msg.body
# check the html version
assert isinstance(msg.alternatives[0][0], str)
assert f"{group.qualified_short_id}</a> as a regression</p>" in msg.alternatives[0][0]
blocks = orjson.loads(mock_post.call_args.kwargs["blocks"])
block = blocks[1]["text"]["text"]
footer = blocks[3]["elements"][0]["text"]
text = mock_post.call_args.kwargs["text"]
assert text == "Issue marked as regression"
title_link = block[13:][1:-1] # removes emoji and <>
notification_uuid = get_notification_uuid(title_link)
assert (
footer
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=regression_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
assert_any_analytics_event(
record_analytics,
EmailNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=group.id,
notification_uuid=notification_uuid,
category="",
id=0,
actor_type="User",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
assert_any_analytics_event(
record_analytics,
SlackIntegrationNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=group.id,
notification_uuid=notification_uuid,
actor_type="User",
category="",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
@patch("sentry.analytics.record")
def test_sends_resolved_in_release_notification(
self, record_analytics: MagicMock, mock_post: MagicMock
) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when an issue is resolved by a release.
"""
release = self.create_release()
with assume_test_silo_mode(SiloMode.REGION):
url = f"/api/0/issues/{self.group.id}/"
with self.tasks():
response = self.client.put(
url,
format="json",
data={"status": "resolved", "statusDetails": {"inRelease": release.version}},
)
assert response.status_code == 200, response.content
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
parsed_version = parse_release(release.version)["description"]
# check the txt version
assert (
f"Resolved Issue\n\n{self.user.username} marked {self.short_id} as resolved in {parsed_version}"
in msg.body
)
# check the html version
assert isinstance(msg.alternatives[0][0], str)
assert (
f'text-decoration: none">{self.short_id}</a> as resolved in' in msg.alternatives[0][0]
)
blocks = orjson.loads(mock_post.call_args.kwargs["blocks"])
block = blocks[1]["text"]["text"]
footer = blocks[3]["elements"][0]["text"]
text = mock_post.call_args.kwargs["text"]
assert text == f"Issue marked as resolved in {parsed_version} by {self.name}"
assert self.group.title in block
title_link = block[13:][1:-1] # removes emoji and <>
notification_uuid = get_notification_uuid(title_link)
assert (
footer
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_in_release_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
assert_any_analytics_event(
record_analytics,
EmailNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=self.group.id,
notification_uuid=notification_uuid,
category="",
id=0,
actor_type="User",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
assert_any_analytics_event(
record_analytics,
SlackIntegrationNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=self.group.id,
notification_uuid=notification_uuid,
actor_type="User",
category="",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
def test_sends_processing_issue_notification(self, mock_post: MagicMock) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when an issue is held back for reprocessing
"""
@patch("sentry.analytics.record")
def test_sends_issue_notification(
self, record_analytics: MagicMock, mock_post: MagicMock
) -> None:
"""
Test that an email AND Slack notification are sent with
the expected values when an issue comes in that triggers an alert rule.
"""
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Member",
"targetIdentifier": str(self.user.id),
}
with assume_test_silo_mode(SiloMode.REGION):
Rule.objects.create(
project=self.project,
label="a rule",
data={
"match": "all",
"actions": [action_data],
},
)
min_ago = before_now(minutes=1).isoformat()
event = self.store_event(
data={
"message": "Hello world",
"timestamp": min_ago,
},
project_id=self.project.id,
)
cache_key = write_event_to_cache(event)
with self.tasks():
post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
group_id=event.group_id,
cache_key=cache_key,
project_id=self.project.id,
eventstream_type=EventStreamEventType.Error.value,
)
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
# check the txt version
assert "Details\n-------\n\n" in msg.body
# check the html version
assert isinstance(msg.alternatives[0][0], str)
assert "Hello world</pre>" in msg.alternatives[0][0]
blocks = orjson.loads(mock_post.call_args_list[0].kwargs["blocks"])
block = blocks[1]["text"]["text"]
footer = blocks[4]["elements"][0]["text"]
assert "Hello world" in block
title_link = block[13:][1:-1] # removes emoji and <>
notification_uuid = get_notification_uuid(title_link)
assert (
footer
== f"{self.project.slug} | <http://testserver/settings/account/notifications/alerts/?referrer=issue_alert-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
assert_any_analytics_event(
record_analytics,
EmailNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=event.group_id,
notification_uuid=notification_uuid,
category="",
id=0,
actor_type="User",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
assert_any_analytics_event(
record_analytics,
SlackIntegrationNotificationSent(
user_id=self.user.id,
organization_id=self.organization.id,
group_id=event.group_id,
notification_uuid=notification_uuid,
actor_type="User",
category="",
),
exclude_fields=["category", "id", "project_id", "actor_id", "actor_type"],
)
| ActivityNotificationTest |
python | viewflow__viewflow | tests/components/test_field_checkbox.py | {
"start": 949,
"end": 1570
} | class ____(forms.Form):
field = forms.BooleanField()
urlpatterns = [
path(
"",
Site(
viewsets=[
Application(
title="Test Application",
urlpatterns=[
path(
"form/",
FormView.as_view(
form_class=TestForm,
template_name="tests/components.html",
),
)
],
),
]
).urls,
)
]
| TestForm |
python | django__django | tests/backends/tests.py | {
"start": 11591,
"end": 25155
} | class ____(TransactionTestCase):
available_apps = ["backends"]
def create_squares_with_executemany(self, args):
self.create_squares(args, "format", True)
def create_squares(self, args, paramstyle, multiple):
opts = Square._meta
tbl = connection.introspection.identifier_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field("root").column)
f2 = connection.ops.quote_name(opts.get_field("square").column)
if paramstyle == "format":
query = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % (tbl, f1, f2)
elif paramstyle == "pyformat":
query = "INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)" % (
tbl,
f1,
f2,
)
else:
raise ValueError("unsupported paramstyle in test")
with connection.cursor() as cursor:
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = ((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 5)
args = ((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 9)
@skipUnlessDBFeature("supports_paramstyle_pyformat")
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {"root": 3, "square": 9}
self.create_squares(args, "pyformat", multiple=False)
self.assertEqual(Square.objects.count(), 1)
@skipUnlessDBFeature("supports_paramstyle_pyformat")
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{"root": i, "square": i**2} for i in range(-5, 6)]
self.create_squares(args, "pyformat", multiple=True)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
@skipUnlessDBFeature("supports_paramstyle_pyformat")
def test_cursor_executemany_with_pyformat_iterator(self):
args = ({"root": i, "square": i**2} for i in range(-3, 2))
self.create_squares(args, "pyformat", multiple=True)
self.assertEqual(Square.objects.count(), 5)
args = ({"root": i, "square": i**2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, "pyformat", multiple=True)
self.assertEqual(Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as Unicode objects.
qn = connection.ops.quote_name
Person(first_name="John", last_name="Doe").save()
Person(first_name="Jane", last_name="Doe").save()
Person(first_name="Mary", last_name="Agnelline").save()
Person(first_name="Peter", last_name="Parker").save()
Person(first_name="Clark", last_name="Kent").save()
opts2 = Person._meta
f3, f4 = opts2.get_field("first_name"), opts2.get_field("last_name")
with connection.cursor() as cursor:
cursor.execute(
"SELECT %s, %s FROM %s ORDER BY %s"
% (
qn(f3.column),
qn(f4.column),
connection.introspection.identifier_converter(opts2.db_table),
qn(f3.column),
)
)
self.assertEqual(cursor.fetchone(), ("Clark", "Kent"))
self.assertEqual(
list(cursor.fetchmany(2)), [("Jane", "Doe"), ("John", "Doe")]
)
self.assertEqual(
list(cursor.fetchall()), [("Mary", "Agnelline"), ("Peter", "Parker")]
)
def test_unicode_password(self):
old_password = connection.settings_dict["PASSWORD"]
connection.settings_dict["PASSWORD"] = "françois"
try:
with connection.cursor():
pass
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with Unicode password: %s" % e)
finally:
connection.settings_dict["PASSWORD"] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, "ops"))
self.assertTrue(hasattr(connection.ops, "connection"))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
"""Creating an existing table returns a DatabaseError"""
query = "CREATE TABLE %s (id INTEGER);" % Article._meta.db_table
with connection.cursor() as cursor:
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(
connection.vendor == "postgresql",
"Psycopg specific cursor.closed attribute needed",
)
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg offers us a way to check that by closed attribute.
# So, run only on psycopg for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_is_usable_after_database_disconnects(self):
"""
is_usable() doesn't crash when the database disconnects (#21553).
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
sql = "SELECT 1" + connection.features.bare_select_suffix
with connection.cursor() as cursor:
reset_queries()
cursor.execute(sql)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
self.assertEqual(list(connection.queries[0]), ["sql", "time"])
self.assertEqual(connection.queries[0]["sql"], sql)
reset_queries()
self.assertEqual(0, len(connection.queries))
sql = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % (
connection.introspection.identifier_converter("backends_square"),
connection.ops.quote_name("root"),
connection.ops.quote_name("square"),
)
with connection.cursor() as cursor:
cursor.executemany(sql, [(1, 1), (2, 4)])
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
self.assertEqual(list(connection.queries[0]), ["sql", "time"])
self.assertEqual(connection.queries[0]["sql"], "2 times: %s" % sql)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature("test_db_allows_multiple_connections")
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
The backend doesn't store an unlimited number of queries (#12581).
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
msg = (
"Limit for query logging exceeded, only the last 3 queries will be "
"returned."
)
with self.assertWarnsMessage(UserWarning, msg) as ctx:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(ctx.filename, __file__)
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
@override_settings(DEBUG=True)
def test_queries_logger(self):
sql = "select 1" + connection.features.bare_select_suffix
with (
connection.cursor() as cursor,
self.assertLogs("django.db.backends", "DEBUG") as handler,
):
cursor.execute(sql)
self.assertGreaterEqual(
records_len := len(handler.records),
1,
f"Wrong number of calls for {handler=} in (expected at least 1, got "
f"{records_len}).",
)
record = handler.records[-1]
# Log raw message, effective level and args are correct.
self.assertEqual(record.msg, "(%.3f) %s; args=%s; alias=%s")
self.assertEqual(record.levelno, logging.DEBUG)
self.assertEqual(len(record.args), 4)
duration, logged_sql, params, alias = record.args
# Duration is hard to test without mocking time, expect under 1 second.
self.assertIsInstance(duration, float)
self.assertLess(duration, 1)
self.assertEqual(duration, record.duration)
# SQL is correct and not formatted.
self.assertEqual(logged_sql, sql)
self.assertNotEqual(logged_sql, connection.ops.format_debug_sql(sql))
self.assertEqual(logged_sql, record.sql)
# Params is None and alias is connection.alias.
self.assertIsNone(params)
self.assertIsNone(record.params)
self.assertEqual(alias, connection.alias)
self.assertEqual(alias, record.alias)
def test_queries_bare_where(self):
sql = f"SELECT 1{connection.features.bare_select_suffix} WHERE 1=1"
with connection.cursor() as cursor:
cursor.execute(sql)
self.assertEqual(cursor.fetchone(), (1,))
def test_timezone_none_use_tz_false(self):
connection.ensure_connection()
with self.settings(TIME_ZONE=None, USE_TZ=False):
connection.init_connection_state()
# These tests aren't conditional because it would require differentiating
# between MySQL+InnoDB and MySQL+MYISAM (something we currently can't do).
| BackendTestCase |
python | huggingface__transformers | src/transformers/utils/dummy_pt_objects.py | {
"start": 13175,
"end": 13435
} | class ____(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def apply_chunking_to_forward(*args, **kwargs):
requires_backends(apply_chunking_to_forward, ["torch"])
| Conv1D |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 967129,
"end": 967533
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(SecurityVulnerability, graphql_name="node")
"""The item at the end of the edge."""
| SecurityVulnerabilityEdge |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink43.py | {
"start": 315,
"end": 907
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink43.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"E9", self.image_dir + "red.png", {"url": r"external:c:\te mp\foo.xlsx"}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | aimacode__aima-python | agents.py | {
"start": 1051,
"end": 1813
} | class ____:
"""This represents any physical object that can appear in an Environment.
You subclass Thing to get the things you want. Each thing can have a
.__name__ slot (used for output only)."""
def __repr__(self):
return '<{}>'.format(getattr(self, '__name__', self.__class__.__name__))
def is_alive(self):
"""Things that are 'alive' should return true."""
return hasattr(self, 'alive') and self.alive
def show_state(self):
"""Display the agent's internal state. Subclasses should override."""
print("I don't know how to show_state.")
def display(self, canvas, x, y, width, height):
"""Display an image of this Thing on the canvas."""
# Do we need this?
pass
| Thing |
python | facebook__pyre-check | tools/generate_taint_models/tests/get_models_filtered_by_callable_test.py | {
"start": 792,
"end": 1195
} | class ____(ModelGenerator[TestModel]):
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return []
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> List[TestModel]:
return [TestModel(0), TestModel(1), TestModel(2)]
def is_even_index(model: TestModel) -> bool:
return model.index % 2 == 0
| TestModelGenerator |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_task_instances.py | {
"start": 63366,
"end": 64399
} | class ____:
def test_get_start_date(self, client, session, create_task_instance):
ti = create_task_instance(
task_id="test_ti_update_state_reschedule_mysql_limit",
state=State.RUNNING,
start_date=timezone.datetime(2024, 1, 1),
session=session,
)
tr = TaskReschedule(
ti_id=ti.id,
start_date=timezone.datetime(2024, 1, 1),
end_date=timezone.datetime(2024, 1, 1, 1),
reschedule_date=timezone.datetime(2024, 1, 1, 2),
)
session.add(tr)
session.commit()
response = client.get(f"/execution/task-reschedules/{ti.id}/start_date")
assert response.status_code == 200
assert response.json() == "2024-01-01T00:00:00Z"
def test_get_start_date_not_found(self, client):
ti_id = "0182e924-0f1e-77e6-ab50-e977118bc139"
response = client.get(f"/execution/task-reschedules/{ti_id}/start_date")
assert response.json() is None
| TestGetRescheduleStartDate |
python | automl__auto-sklearn | autosklearn/data/feature_validator.py | {
"start": 455,
"end": 17533
} | class ____(BaseEstimator):
"""
Checks the input data to Auto-Sklearn.
It also determines what columns are categorical and which ones are numerical,
so that the pre-processing pipeline can process this columns accordingly.
Attributes
----------
feat_type: Optional[List[str]] = None
In case the dataset is not a pandas DataFrame:
+ If provided, this list indicates which columns should be treated as
categorical it is internally transformed into a dictionary that
indicates a mapping from column index to categorical/numerical.
+ If not provided, by default all columns are treated as numerical
If the input dataset is of type pandas dataframe, this argument
must be none, as the column type will be inferred from the pandas dtypes.
data_type:
Class name of the data type provided during fit.
"""
def __init__(
self,
feat_type: Optional[List[str]] = None,
logger: Optional[PickableLoggerAdapter] = None,
allow_string_features: bool = True,
) -> None:
# If a dataframe was provided, we populate
# this attribute with a mapping from column to
# {numerical | categorical | string}
self.feat_type: Optional[Dict[Union[str, int], str]] = None
if feat_type is not None:
if isinstance(feat_type, dict):
self.feat_type = feat_type
elif not isinstance(feat_type, List):
raise ValueError(
"Auto-Sklearn expects a list of categorical/"
"numerical/string feature types, yet a"
" {} was provided".format(type(feat_type))
)
else:
# Convert to a dictionary which will be passed to the ColumnTransformer
# Column Transformer supports strings or integer indexes
self.feat_type = {i: feat for i, feat in enumerate(feat_type)}
# Register types to detect unsupported data format changes
self.data_type = None # type: Optional[type]
self.dtypes = {} # type: Dict[str, str]
self.logger = logger if logger is not None else logging.getLogger(__name__)
self._is_fitted = False
self.allow_string_features = allow_string_features
def fit(
self,
X_train: SUPPORTED_FEAT_TYPES,
X_test: Optional[SUPPORTED_FEAT_TYPES] = None,
) -> "FeatureValidator":
"""
Validates input data to Auto-Sklearn.
The supported data types are List, numpy arrays and pandas DataFrames.
CSR sparse data types are also supported
Parameters
----------
X_train: SUPPORTED_FEAT_TYPES
A set of features that are going to be validated (type and dimensionality
checks) and a encoder fitted in the case the data needs encoding
X_test: Optional[SUPPORTED_FEAT_TYPES]
A hold out set of data used for checking
"""
# If a list was provided, it will be converted to pandas
if isinstance(X_train, List):
X_train, X_test = self.list_to_dataframe(X_train, X_test)
self._check_data(X_train)
# Handle categorical feature identification for the pipeline
if hasattr(X_train, "iloc"):
if self.feat_type is not None:
raise ValueError(
"When providing a DataFrame to Auto-Sklearn, we extract "
"the feature types from the DataFrame.dtypes. That is, "
"providing the option feat_type to the fit method is not "
"supported when using a Dataframe. Please make sure that the "
"type of each column in your DataFrame is properly set. "
"More details about having the correct data type in your "
"DataFrame can be seen in "
"https://pandas.pydata.org/pandas-docs/stable/reference"
"/api/pandas.DataFrame.astype.html"
)
else:
self.feat_type = self.get_feat_type_from_columns(X_train)
else:
# Numpy array was provided
if self.feat_type is None:
# Assume numerical columns if a numpy array has no feature types
self.feat_type = {i: "numerical" for i in range(np.shape(X_train)[1])}
else:
# Check The feat type provided
if len(self.feat_type) != np.shape(X_train)[1]:
raise ValueError(
"Array feat_type does not have same number of "
"variables as X has features. %d vs %d."
% (len(self.feat_type), np.shape(X_train)[1])
)
if not all([isinstance(f, str) for f in self.feat_type.values()]):
raise ValueError(
"feat_type must only contain strings: {}".format(
list(self.feat_type.values()),
)
)
for ft in self.feat_type.values():
if ft.lower() not in ["categorical", "numerical", "string"]:
raise ValueError(
"Only `Categorical`, `Numerical` and `String` are "
"valid feature types"
)
if X_test is not None:
self._check_data(X_test)
if np.shape(X_train)[1] != np.shape(X_test)[1]:
raise ValueError(
"The feature dimensionality of the train and test "
"data does not match train({}) != test({})".format(
np.shape(X_train)[1], np.shape(X_test)[1]
)
)
self._is_fitted = True
return self
def transform(
self,
X: SUPPORTED_FEAT_TYPES,
) -> Union[np.ndarray, spmatrix, pd.DataFrame]:
"""
Validates and fit a categorical encoder (if needed) to the features.
The supported data types are List, numpy arrays and pandas DataFrames.
Parameters
----------
X_train: SUPPORTED_FEAT_TYPES
A set of features, whose categorical features are going to be
transformed
Return
------
np.ndarray | spmatrix | pd.DataFrame:
The transformed array
"""
if not self._is_fitted:
raise NotFittedError(
"Cannot call transform on a validator that is not fitted"
)
# If a list was provided, it will be converted to pandas
if isinstance(X, List):
X_transformed, _ = self.list_to_dataframe(X)
else:
X_transformed = X
# Check the data here so we catch problems on new test data
self._check_data(X_transformed)
# Sparse related transformations
# Not all sparse format support index sorting
if isinstance(X_transformed, spmatrix):
if not isinstance(X_transformed, csr_matrix):
self.logger.warning(
f"Sparse data provided is of type {type(X_transformed)} "
"yet Auto-Sklearn only support csr_matrix. Auto-sklearn "
"will convert the provided data to the csr_matrix format."
)
X_transformed = X_transformed.tocsr(copy=False)
X_transformed.sort_indices()
return X_transformed
def _check_data(
self,
X: SUPPORTED_FEAT_TYPES,
) -> None:
"""
Feature dimensionality and data type checks
Parameters
----------
X: SUPPORTED_FEAT_TYPES
A set of features that are going to be validated (type and dimensionality)
and a encoder fitted in the case the data needs encoding
"""
# We consider columns that are all nan in a pandas frame as category
if hasattr(X, "columns"):
for column in cast(pd.DataFrame, X).columns:
if X[column].isna().all():
X[column] = X[column].astype("category")
if not isinstance(X, (np.ndarray, pd.DataFrame)) and not isinstance(
X, spmatrix
):
raise ValueError(
"Auto-sklearn only supports Numpy arrays, Pandas DataFrames,"
" scipy sparse and Python Lists, yet, the provided input is"
" of type {}".format(type(X))
)
if self.data_type is None:
self.data_type = type(X)
if self.data_type != type(X):
self.logger.warning(
f"Auto-sklearn previously received features of type {self.data_type} "
f"yet the current features have type {type(X)}. Changing the dtype "
"of inputs to an estimator might cause problems"
)
# Do not support category/string numpy data. Only numbers
if hasattr(X, "dtype"):
if not np.issubdtype(X.dtype.type, np.number): # type: ignore[union-attr]
raise ValueError(
"When providing a numpy array to Auto-sklearn, the only valid"
f" dtypes are numerical ones. The provided data type {X.dtype.type}"
" is not supported."
)
# Then for Pandas, we do not support Nan in categorical columns
if hasattr(X, "iloc"):
# If entered here, we have a pandas dataframe
X = cast(pd.DataFrame, X)
dtypes = {col: X[col].dtype.name.lower() for col in X.columns}
if len(self.dtypes) > 0:
if self.dtypes != dtypes:
# To support list, we need to support object inference.
# In extreme cases, the train column might be all integer,
# and the test column might be float.
self.logger.warning(
"Changing the dtype of the features after fit() is "
"not recommended. Fit() method was called with "
"{} whereas the new features have {} as type".format(
self.dtypes,
dtypes,
)
)
else:
self.dtypes = dtypes
def get_feat_type_from_columns(
self,
X: pd.DataFrame,
) -> Dict[Union[str, int], str]:
"""
Returns a dictionary that maps pandas dataframe columns to a feature type.
This feature type can be categorical, numerical or string
Parameters
----------
X: pd.DataFrame
A set of features that are going to be validated (type and dimensionality
checks) and a encoder fitted in the case the data needs encoding
Returns
-------
feat_type:
dictionary with column to feature type mapping
"""
# Also, register the feature types for the estimator
feat_type = {}
# Make sure each column is a valid type
for i, column in enumerate(X.columns):
if is_sparse(X[column]):
raise ValueError(
"Auto-sklearn does not yet support sparse pandas Series."
f" Please convert {column} to a dense format."
)
elif X[column].dtype.name in ["category", "bool"]:
feat_type[column] = "categorical"
elif X[column].dtype.name == "string":
if self.allow_string_features:
feat_type[column] = "string"
else:
feat_type[column] = "categorical"
warnings.warn(
f"you disabled text encoding column {column} will be "
f"encoded as category"
)
# Move away from np.issubdtype as it causes
# TypeError: data type not understood in certain pandas types
elif not is_numeric_dtype(X[column]):
if X[column].dtype.name == "object":
warnings.warn(
f"Input Column {column} has generic type object. "
f"Autosklearn will treat this column as string. "
f"Please ensure that this setting is suitable for your task.",
UserWarning,
)
if self.allow_string_features:
feat_type[column] = "string"
else:
feat_type[column] = "categorical"
warnings.warn(
f"you disabled text encoding column {column} will be"
f"encoded as category"
)
elif pd.core.dtypes.common.is_datetime_or_timedelta_dtype(
X[column].dtype
):
raise ValueError(
"Auto-sklearn does not support time and/or date datatype as "
f"given in column {column}. Please convert the time "
" information to a numerical value first. One example on how to"
" do this can be found on "
" https://stats.stackexchange.com/questions/311494/"
)
else:
raise ValueError(
"Input Column {} has unsupported dtype {}. "
"Supported column types are categorical/bool/numerical dtypes. "
"Make sure your data is formatted in a correct way, "
"before feeding it to Auto-Sklearn.".format(
column,
X[column].dtype.name,
)
)
else:
feat_type[column] = "numerical"
return feat_type
def list_to_dataframe(
self,
X_train: SUPPORTED_FEAT_TYPES,
X_test: Optional[SUPPORTED_FEAT_TYPES] = None,
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
"""
Converts a list to a DataFrame. In this process, column types are inferred.
If test data is provided, we proactively match it to train data
Parameters
----------
X_train: SUPPORTED_FEAT_TYPES
A set of features that are going to be validated (type and dimensionality
checks) and a encoder fitted in the case the data needs encoding
X_test: Optional[SUPPORTED_FEAT_TYPES]
A hold out set of data used for checking
Returns
-------
Union[pd.DataFrame, pd.DataFrame]:
transformed (train, test) data from list to pandas DataFrame
"""
# If a list was provided, it will be converted to pandas
X_train = pd.DataFrame(data=X_train).convert_dtypes()
# Store the dtypes and use in case of re-fit
if len(self.dtypes) == 0:
self.dtypes = {
col: X_train[col].dtype.name.lower() for col in X_train.columns
}
else:
for col in X_train.columns:
# Try to convert to the original dtype used to fit the validator
# But also be robust to extreme cases (for example, the train data for a
# column was all np.int-like and the test data is np.float-type)
try:
X_train[col] = X_train[col].astype(self.dtypes[col])
except Exception as e:
self.logger.warning(
f"Failed to format column {col} as {self.dtypes[col]}: {e}"
)
self.dtypes[col] = X_train[col].dtype.name.lower()
self.logger.warning(
"The provided feature types to autosklearn are of type list."
"Features have been interpreted as: {}".format(
[(col, t) for col, t in zip(X_train.columns, X_train.dtypes)]
)
)
if X_test is not None:
if not isinstance(X_test, List):
self.logger.warning(
"Train features are a list while the provided test data"
"is {}. X_test will be casted as DataFrame.".format(type(X_test))
)
X_test = pd.DataFrame(data=X_test)
for col in X_test.columns:
try:
X_test[col] = X_test[col].astype(self.dtypes[col])
except Exception as e:
self.logger.warning(
f"Failed to format column {col} as {self.dtypes[col]}: {e}"
)
self.dtypes[col] = X_test[col].dtype.name.lower()
return X_train, X_test
| FeatureValidator |
python | Lightning-AI__lightning | examples/fabric/reinforcement_learning/rl/agent.py | {
"start": 3602,
"end": 9238
} | class ____(LightningModule):
def __init__(
self,
envs: gym.vector.SyncVectorEnv,
act_fun: str = "relu",
ortho_init: bool = False,
vf_coef: float = 1.0,
ent_coef: float = 0.0,
clip_coef: float = 0.2,
clip_vloss: bool = False,
normalize_advantages: bool = False,
**torchmetrics_kwargs,
):
super().__init__()
if act_fun.lower() == "relu":
act_fun = torch.nn.ReLU()
elif act_fun.lower() == "tanh":
act_fun = torch.nn.Tanh()
else:
raise ValueError("Unrecognized activation function: `act_fun` must be either `relu` or `tanh`")
self.vf_coef = vf_coef
self.ent_coef = ent_coef
self.clip_coef = clip_coef
self.clip_vloss = clip_vloss
self.normalize_advantages = normalize_advantages
self.critic = torch.nn.Sequential(
layer_init(
torch.nn.Linear(math.prod(envs.single_observation_space.shape), 64),
ortho_init=ortho_init,
),
act_fun,
layer_init(torch.nn.Linear(64, 64), ortho_init=ortho_init),
act_fun,
layer_init(torch.nn.Linear(64, 1), std=1.0, ortho_init=ortho_init),
)
self.actor = torch.nn.Sequential(
layer_init(
torch.nn.Linear(math.prod(envs.single_observation_space.shape), 64),
ortho_init=ortho_init,
),
act_fun,
layer_init(torch.nn.Linear(64, 64), ortho_init=ortho_init),
act_fun,
layer_init(torch.nn.Linear(64, envs.single_action_space.n), std=0.01, ortho_init=ortho_init),
)
self.avg_pg_loss = MeanMetric(**torchmetrics_kwargs)
self.avg_value_loss = MeanMetric(**torchmetrics_kwargs)
self.avg_ent_loss = MeanMetric(**torchmetrics_kwargs)
def get_action(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor]:
logits = self.actor(x)
distribution = Categorical(logits=logits)
if action is None:
action = distribution.sample()
return action, distribution.log_prob(action), distribution.entropy()
def get_greedy_action(self, x: Tensor) -> Tensor:
logits = self.actor(x)
probs = F.softmax(logits, dim=-1)
return torch.argmax(probs, dim=-1)
def get_value(self, x: Tensor) -> Tensor:
return self.critic(x)
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
action, log_prob, entropy = self.get_action(x, action)
value = self.get_value(x)
return action, log_prob, entropy, value
def forward(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
return self.get_action_and_value(x, action)
@torch.no_grad()
def estimate_returns_and_advantages(
self,
rewards: Tensor,
values: Tensor,
dones: Tensor,
next_obs: Tensor,
next_done: Tensor,
num_steps: int,
gamma: float,
gae_lambda: float,
) -> tuple[Tensor, Tensor]:
next_value = self.get_value(next_obs).reshape(1, -1)
advantages = torch.zeros_like(rewards)
lastgaelam = 0
for t in reversed(range(num_steps)):
if t == num_steps - 1:
nextnonterminal = torch.logical_not(next_done)
nextvalues = next_value
else:
nextnonterminal = torch.logical_not(dones[t + 1])
nextvalues = values[t + 1]
delta = rewards[t] + gamma * nextvalues * nextnonterminal - values[t]
advantages[t] = lastgaelam = delta + gamma * gae_lambda * nextnonterminal * lastgaelam
returns = advantages + values
return returns, advantages
def training_step(self, batch: dict[str, Tensor]):
# Get actions and values given the current observations
_, newlogprob, entropy, newvalue = self(batch["obs"], batch["actions"].long())
logratio = newlogprob - batch["logprobs"]
ratio = logratio.exp()
# Policy loss
advantages = batch["advantages"]
if self.normalize_advantages:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
pg_loss = policy_loss(batch["advantages"], ratio, self.clip_coef)
# Value loss
v_loss = value_loss(
newvalue,
batch["values"],
batch["returns"],
self.clip_coef,
self.clip_vloss,
self.vf_coef,
)
# Entropy loss
ent_loss = entropy_loss(entropy, self.ent_coef)
# Update metrics
self.avg_pg_loss(pg_loss)
self.avg_value_loss(v_loss)
self.avg_ent_loss(ent_loss)
# Overall loss
return pg_loss + ent_loss + v_loss
def on_train_epoch_end(self, global_step: int) -> None:
# Log metrics and reset their internal state
self.logger.log_metrics(
{
"Loss/policy_loss": self.avg_pg_loss.compute(),
"Loss/value_loss": self.avg_value_loss.compute(),
"Loss/entropy_loss": self.avg_ent_loss.compute(),
},
global_step,
)
self.reset_metrics()
def reset_metrics(self):
self.avg_pg_loss.reset()
self.avg_value_loss.reset()
self.avg_ent_loss.reset()
def configure_optimizers(self, lr: float):
return torch.optim.Adam(self.parameters(), lr=lr, eps=1e-4)
| PPOLightningAgent |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI049.py | {
"start": 95,
"end": 153
} | class ____(typing.TypedDict):
bar: int
| _UnusedTypedDict2 |
python | huggingface__transformers | src/transformers/models/glm4v/modeling_glm4v.py | {
"start": 5968,
"end": 12302
} | class ____(nn.Module):
def __init__(self, config: Glm4vVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def forward(self, embeddings, lengths, image_shapes, h_coords, w_coords) -> torch.Tensor:
"""
Forward pass with integrated position encoding adaptation using 2D interpolation.
Args:
embeddings: Input embeddings tensor
lengths (torch.Tensor): Sequence lengths for each image in the batch.
image_shapes (torch.Tensor): Tensor of shape [batch_size, 3] representing the image shapes (t, h, w).
h_coords (torch.Tensor): Tensor of shape [total_seq] representing the h coordinate for each patch.
w_coords (torch.Tensor): Tensor of shape [total_seq] representing the w coordinate for each patch.
Returns:
torch.Tensor: Embeddings with adapted position encoding added.
"""
# Get position embedding parameters
pos_embed_weight = self.position_embedding.weight
hidden_size = pos_embed_weight.shape[1]
total_seq = h_coords.shape[0]
device = pos_embed_weight.device
# Move coordinates to correct device
h_coords, w_coords = h_coords.to(device), w_coords.to(device)
# Handle empty sequence case
if total_seq == 0:
adapted_pos_embed = torch.empty(0, hidden_size, device=device, dtype=pos_embed_weight.dtype)
else:
# Convert inputs to tensors if needed
if isinstance(lengths, list):
lengths = torch.tensor(lengths, device=device, dtype=torch.long)
if not isinstance(image_shapes, torch.Tensor):
image_shapes = torch.tensor(image_shapes, device=device, dtype=torch.long)
# Prepare 2D position embedding
orig_size_sq = pos_embed_weight.shape[0]
orig_size = int(orig_size_sq**0.5)
pos_embed_2d = (
pos_embed_weight.view(orig_size, orig_size, hidden_size)
.permute(2, 0, 1)
.unsqueeze(0)
.to(device=device, dtype=torch.float32)
)
# Calculate target dimensions for each patch
target_h = torch.cat([image_shapes[i, 1].repeat(lengths[i]) for i in range(len(lengths))]).to(
device=device, dtype=torch.float32
)
target_w = torch.cat([image_shapes[i, 2].repeat(lengths[i]) for i in range(len(lengths))]).to(
device=device, dtype=torch.float32
)
# Normalize coordinates to [-1, 1] range for grid_sample
h_coords = h_coords.to(device=device, dtype=torch.float32)
w_coords = w_coords.to(device=device, dtype=torch.float32)
norm_w = ((w_coords + 0.5) / target_w) * 2 - 1
norm_h = ((h_coords + 0.5) / target_h) * 2 - 1
# Create sampling grid
grid = torch.stack((norm_w, norm_h), dim=-1).unsqueeze(0).unsqueeze(2)
# Perform bicubic interpolation
interpolated_embed_fp32 = F.grid_sample(
pos_embed_2d, grid, mode="bicubic", align_corners=False, padding_mode="border"
)
# Reshape and convert back to original dtype
adapted_pos_embed_fp32 = interpolated_embed_fp32.squeeze(0).squeeze(-1).permute(1, 0)
adapted_pos_embed = adapted_pos_embed_fp32.to(pos_embed_weight.dtype).to(embeddings.device)
# Add adapted position encoding to embeddings
embeddings = embeddings + adapted_pos_embed
return embeddings
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb_vision(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
orig_q_dtype = q.dtype
orig_k_dtype = k.dtype
q, k = q.float(), k.float()
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = q_embed.to(orig_q_dtype)
k_embed = k_embed.to(orig_k_dtype)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Glm4vVisionEmbeddings |
python | scipy__scipy | scipy/interpolate/_ndgriddata.py | {
"start": 563,
"end": 12154
} | class ____(NDInterpolatorBase):
"""Nearest-neighbor interpolator in N > 1 dimensions.
Methods
-------
__call__
Parameters
----------
x : (npoints, ndims) 2-D ndarray of floats
Data point coordinates.
y : (npoints, ...) N-D ndarray of float or complex
Data values. The length of `y` along the first axis must be equal to
the length of `x`.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
See Also
--------
griddata :
Interpolate unstructured D-D data.
LinearNDInterpolator :
Piecewise linear interpolator in N dimensions.
CloughTocher2DInterpolator :
Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
interpn : Interpolation on a regular grid or rectilinear grid.
RegularGridInterpolator : Interpolator on a regular or rectilinear grid
in arbitrary dimensions (`interpn` wraps this
class).
Notes
-----
Uses ``scipy.spatial.cKDTree``
.. note:: For data on a regular grid use `interpn` instead.
Examples
--------
We can interpolate values on a 2D plane:
>>> from scipy.interpolate import NearestNDInterpolator
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = rng.random(10) - 0.5
>>> y = rng.random(10) - 0.5
>>> z = np.hypot(x, y)
>>> X = np.linspace(min(x), max(x))
>>> Y = np.linspace(min(y), max(y))
>>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
>>> interp = NearestNDInterpolator(list(zip(x, y)), z)
>>> Z = interp(X, Y)
>>> plt.pcolormesh(X, Y, Z, shading='auto')
>>> plt.plot(x, y, "ok", label="input point")
>>> plt.legend()
>>> plt.colorbar()
>>> plt.axis("equal")
>>> plt.show()
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = np.asarray(y)
def __call__(self, *args, **query_options):
"""
Evaluate interpolator at given points.
Parameters
----------
x1, x2, ... xn : array-like of float
Points where to interpolate data at.
x1, x2, ... xn can be array-like of float with broadcastable shape.
or x1 can be array-like of float with shape ``(..., ndim)``
**query_options
This allows ``eps``, ``p``, ``distance_upper_bound``, and ``workers``
being passed to the cKDTree's query function to be explicitly set.
See `scipy.spatial.cKDTree.query` for an overview of the different options.
.. versionadded:: 1.12.0
"""
# For the sake of enabling subclassing, NDInterpolatorBase._set_xi performs
# some operations which are not required by NearestNDInterpolator.__call__,
# hence here we operate on xi directly, without calling a parent class function.
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
# We need to handle two important cases:
# (1) the case where xi has trailing dimensions (..., ndim), and
# (2) the case where y has trailing dimensions
# We will first flatten xi to deal with case (1),
# do the computation in flattened array while retaining y's dimensionality,
# and then reshape the interpolated values back to match xi's shape.
# Flatten xi for the query
xi_flat = xi.reshape(-1, xi.shape[-1])
original_shape = xi.shape
flattened_shape = xi_flat.shape
# if distance_upper_bound is set to not be infinite,
# then we need to consider the case where cKDtree
# does not find any points within distance_upper_bound to return.
# It marks those points as having infinte distance, which is what will be used
# below to mask the array and return only the points that were deemed
# to have a close enough neighbor to return something useful.
dist, i = self.tree.query(xi_flat, **query_options)
valid_mask = np.isfinite(dist)
# create a holder interp_values array and fill with nans.
if self.values.ndim > 1:
interp_shape = flattened_shape[:-1] + self.values.shape[1:]
else:
interp_shape = flattened_shape[:-1]
if np.issubdtype(self.values.dtype, np.complexfloating):
interp_values = np.full(interp_shape, np.nan, dtype=self.values.dtype)
else:
interp_values = np.full(interp_shape, np.nan)
interp_values[valid_mask] = self.values[i[valid_mask], ...]
if self.values.ndim > 1:
new_shape = original_shape[:-1] + self.values.shape[1:]
else:
new_shape = original_shape[:-1]
interp_values = interp_values.reshape(new_shape)
return interp_values
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Convenience function for interpolating unstructured data in multiple dimensions.
Parameters
----------
points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
Data point coordinates.
values : ndarray of float or complex, shape (n,)
Data values.
xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tessellate the input point set to N-D
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Returns
-------
ndarray
Array of interpolated values.
See Also
--------
LinearNDInterpolator :
Piecewise linear interpolator in N dimensions.
NearestNDInterpolator :
Nearest-neighbor interpolator in N dimensions.
CloughTocher2DInterpolator :
Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
interpn : Interpolation on a regular grid or rectilinear grid.
RegularGridInterpolator : Interpolator on a regular or rectilinear grid
in arbitrary dimensions (`interpn` wraps this
class).
Notes
-----
.. versionadded:: 0.9
.. note:: For data on a regular grid use `interpn` instead.
Examples
--------
Suppose we want to interpolate the 2-D function
>>> import numpy as np
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> rng = np.random.default_rng()
>>> points = rng.random((1000, 2))
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
""" # numpy/numpydoc#87 # noqa: E501
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from ._interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError(
f"Unknown interpolation method {method!r} for {ndim} dimensional data"
)
| NearestNDInterpolator |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_dialect.py | {
"start": 34062,
"end": 37703
} | class ____(fixtures.TestBase):
__backend__ = True
__only_on__ = "oracle"
@testing.fixture
def scalar_strings(self, connection):
connection.exec_driver_sql(
"CREATE OR REPLACE TYPE strings_t IS TABLE OF VARCHAR2 (100)"
)
connection.exec_driver_sql(
r"""
CREATE OR REPLACE FUNCTION scalar_strings (
count_in IN INTEGER, string_in IN VARCHAR2)
RETURN strings_t
AUTHID DEFINER
IS
l_strings strings_t := strings_t ();
BEGIN
l_strings.EXTEND (count_in);
FOR indx IN 1 .. count_in
LOOP
l_strings (indx) := string_in;
END LOOP;
RETURN l_strings;
END;
"""
)
yield
connection.exec_driver_sql("DROP FUNCTION scalar_strings")
connection.exec_driver_sql("DROP TYPE strings_t")
@testing.fixture
def two_strings(self, connection):
connection.exec_driver_sql(
"""
CREATE OR REPLACE TYPE two_strings_ot
AUTHID DEFINER IS OBJECT
(
string1 VARCHAR2 (10),
string2 VARCHAR2 (10)
)"""
)
connection.exec_driver_sql(
"""
CREATE OR REPLACE TYPE two_strings_nt IS TABLE OF two_strings_ot
"""
)
connection.exec_driver_sql(
"""
CREATE OR REPLACE FUNCTION three_pairs
RETURN two_strings_nt
AUTHID DEFINER
IS
l_strings two_strings_nt;
BEGIN
RETURN two_strings_nt (two_strings_ot ('a', 'b'),
two_strings_ot ('c', 'd'),
two_strings_ot ('e', 'f'));
END;
"""
)
yield
connection.exec_driver_sql("DROP FUNCTION three_pairs")
connection.exec_driver_sql("DROP TYPE two_strings_nt")
connection.exec_driver_sql("DROP TYPE two_strings_ot")
def test_scalar_strings_control(self, scalar_strings, connection):
result = (
connection.exec_driver_sql(
"SELECT COLUMN_VALUE my_string FROM TABLE "
"(scalar_strings (5, 'some string'))"
)
.scalars()
.all()
)
eq_(result, ["some string"] * 5)
def test_scalar_strings_named_control(self, scalar_strings, connection):
result = (
connection.exec_driver_sql(
"SELECT COLUMN_VALUE anon_1 "
"FROM TABLE (scalar_strings (5, 'some string')) anon_1"
)
.scalars()
.all()
)
eq_(result, ["some string"] * 5)
def test_scalar_strings(self, scalar_strings, connection):
fn = func.scalar_strings(5, "some string")
result = connection.execute(select(fn.column_valued())).scalars().all()
eq_(result, ["some string"] * 5)
def test_two_strings_control(self, two_strings, connection):
result = connection.exec_driver_sql(
"SELECT string1, string2 FROM TABLE (three_pairs ())"
).all()
eq_(result, [("a", "b"), ("c", "d"), ("e", "f")])
def test_two_strings(self, two_strings, connection):
fn = func.three_pairs().table_valued("string1", "string2")
result = connection.execute(select(fn.c.string1, fn.c.string2)).all()
eq_(result, [("a", "b"), ("c", "d"), ("e", "f")])
def test_two_independent_tables(self, scalar_strings, connection):
fn1 = func.scalar_strings(5, "string one").column_valued()
fn2 = func.scalar_strings(3, "string two").column_valued()
result = connection.execute(select(fn1, fn2).where(fn1 != fn2)).all()
eq_(
result,
list(itertools.product(["string one"] * 5, ["string two"] * 3)),
)
| TableValuedTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/completion/base.py | {
"start": 11053,
"end": 12091
} | class ____(Completer):
"""
Completer class that can dynamically returns any Completer.
:param get_completer: Callable that returns a :class:`.Completer` instance.
"""
def __init__(self, get_completer: Callable[[], Completer | None]) -> None:
self.get_completer = get_completer
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
completer = self.get_completer() or DummyCompleter()
return completer.get_completions(document, complete_event)
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
completer = self.get_completer() or DummyCompleter()
async for completion in completer.get_completions_async(
document, complete_event
):
yield completion
def __repr__(self) -> str:
return f"DynamicCompleter({self.get_completer!r} -> {self.get_completer()!r})"
| DynamicCompleter |
python | python-markdown__markdown | tests/test_apis.py | {
"start": 6170,
"end": 7195
} | class ____(unittest.TestCase):
""" Test Markdown's `HtmlStash`. """
def setUp(self):
self.stash = markdown.util.HtmlStash()
self.placeholder = self.stash.store('foo')
def testSimpleStore(self):
""" Test `HtmlStash.store`. """
self.assertEqual(self.placeholder, self.stash.get_placeholder(0))
self.assertEqual(self.stash.html_counter, 1)
self.assertEqual(self.stash.rawHtmlBlocks, ['foo'])
def testStoreMore(self):
""" Test `HtmlStash.store` with additional blocks. """
placeholder = self.stash.store('bar')
self.assertEqual(placeholder, self.stash.get_placeholder(1))
self.assertEqual(self.stash.html_counter, 2)
self.assertEqual(
self.stash.rawHtmlBlocks,
['foo', 'bar']
)
def testReset(self):
""" Test `HtmlStash.reset`. """
self.stash.reset()
self.assertEqual(self.stash.html_counter, 0)
self.assertEqual(self.stash.rawHtmlBlocks, [])
| TestHtmlStash |
python | ray-project__ray | python/ray/llm/tests/batch/gpu/processor/test_vllm_engine_proc.py | {
"start": 8670,
"end": 10232
} | class ____:
@pytest.mark.parametrize(
"experimental_config",
[
{"max_tasks_in_flight_per_actor": 10},
{},
],
)
def test_experimental_max_tasks_in_flight_per_actor_usage(
self, experimental_config
):
"""Tests that max_tasks_in_flight_per_actor is set properly in the ActorPoolStrategy."""
from ray.llm._internal.batch.processor.base import DEFAULT_MAX_TASKS_IN_FLIGHT
from ray.llm._internal.batch.processor.vllm_engine_proc import (
build_vllm_engine_processor,
vLLMEngineProcessorConfig,
)
with patch("ray.data.ActorPoolStrategy") as mock_actor_pool:
mock_actor_pool.return_value = MagicMock()
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.2-1B-Instruct",
experimental=experimental_config,
)
build_vllm_engine_processor(config)
mock_actor_pool.assert_called()
call_kwargs = mock_actor_pool.call_args[1]
if experimental_config:
assert (
call_kwargs["max_tasks_in_flight_per_actor"]
== experimental_config["max_tasks_in_flight_per_actor"]
)
else:
assert (
call_kwargs["max_tasks_in_flight_per_actor"]
== DEFAULT_MAX_TASKS_IN_FLIGHT
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestVLLMEngineProcessorConfig |
python | django-haystack__django-haystack | haystack/exceptions.py | {
"start": 539,
"end": 672
} | class ____(HaystackError):
"""Raised when a model instance has not been provided for More Like This."""
pass
| MoreLikeThisError |
python | huggingface__transformers | src/transformers/models/olmo/modeling_olmo.py | {
"start": 15000,
"end": 15536
} | class ____(PreTrainedModel):
config: OlmoConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["OlmoDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": OlmoDecoderLayer,
"attentions": OlmoAttention,
}
@auto_docstring
| OlmoPreTrainedModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor24.py | {
"start": 993,
"end": 1158
} | class ____(Container[int]):
def increment(self):
# This should generate an error if strictParameterNoneValue is false.
self.value += 1
| IntContainer |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 8006,
"end": 8264
} | class ____(ActionTool):
''' A base class action tools acting on plots.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@abstract
| PlotActionTool |
python | pytorch__pytorch | test/test_testing.py | {
"start": 14366,
"end": 17200
} | class ____(MultiProcessTestCase):
@slowTest
def test_throw_unrecoverable_cuda_exception(self, device):
x = torch.rand(10, device=device)
# cause unrecoverable CUDA exception, recoverable on CPU
y = x[torch.tensor([25])].cpu()
@slowTest
def test_trivial_passing_test_case_on_cpu_cuda(self, device):
x1 = torch.tensor([0., 1.], device=device)
x2 = torch.tensor([0., 1.], device='cpu')
self.assertEqual(x1, x2)
instantiate_device_type_tests(
TestThatContainsCUDAAssertFailure,
globals(),
only_for='cuda'
)
if __name__ == '__main__':
run_tests()
""")
# we are currently disabling CUDA early termination for distributed tests.
self.assertIn('errors=2', stderr)
@expectedFailureMeta # This is only supported for CPU and CUDA
@onlyNativeDeviceTypes
def test_get_supported_dtypes(self, device):
# Test the `get_supported_dtypes` helper function.
# We acquire the dtypes for few Ops dynamically and verify them against
# the correct statically described values.
ops_to_test = list(filter(lambda op: op.name in ['atan2', 'topk', 'xlogy'], op_db))
for op in ops_to_test:
dynamic_dtypes = opinfo.utils.get_supported_dtypes(op, op.sample_inputs_func, self.device_type)
dynamic_dispatch = opinfo.utils.dtypes_dispatch_hint(dynamic_dtypes)
if self.device_type == 'cpu':
dtypes = op.dtypes
else: # device_type ='cuda'
dtypes = op.dtypesIfCUDA
self.assertTrue(set(dtypes) == set(dynamic_dtypes))
self.assertTrue(set(dtypes) == set(dynamic_dispatch.dispatch_fn()))
@onlyCPU
@ops(
[
op
for op in op_db
if len(
op.supported_dtypes("cpu").symmetric_difference(
op.supported_dtypes("cuda")
)
)
> 0
][:1],
dtypes=OpDTypes.none,
)
def test_supported_dtypes(self, device, op):
self.assertNotEqual(op.supported_dtypes("cpu"), op.supported_dtypes("cuda"))
self.assertEqual(op.supported_dtypes("cuda"), op.supported_dtypes("cuda:0"))
self.assertEqual(
op.supported_dtypes(torch.device("cuda")),
op.supported_dtypes(torch.device("cuda", index=1)),
)
def test_setup_and_teardown_run_for_device_specific_tests(self, device):
# TODO: Move this (and other similar text blocks) to some fixtures/ subdir
stderr = TestCase.runWithPytorchAPIUsageStderr(f"""\
#!/usr/bin/env python3
import torch
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_utils import TestCase, run_tests
| TestThatContainsCUDAAssertFailure |
python | kamyu104__LeetCode-Solutions | Python/count-subarrays-where-max-element-appears-at-least-k-times.py | {
"start": 564,
"end": 1051
} | class ____(object):
def countSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
mx = max(nums)
result = (len(nums)+1)*len(nums)//2
left = cnt = 0
for right in xrange(len(nums)):
cnt += int(nums[right] == mx)
while cnt == k:
cnt -= int(nums[left] == mx)
left += 1
result -= right-left+1
return result
| Solution2 |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels30.py | {
"start": 315,
"end": 1734
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels30.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [67858816, 67863296]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": True,
"custom": [
{"delete": True},
None,
{"delete": True},
None,
{"delete": True},
],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | PrefectHQ__prefect | src/integrations/prefect-docker/tests/test_containers.py | {
"start": 1978,
"end": 2456
} | class ____:
async def test_stop_kwargs(self, mock_docker_host: MagicMock):
stop_kwargs = dict(container_id="42")
with disable_run_logger():
container = await stop_docker_container.fn(
docker_host=mock_docker_host, **stop_kwargs
)
assert container.id == "42"
client = mock_docker_host.get_client()
client.__enter__.return_value.containers.get.assert_called_once_with("42")
| TestStopDockerContainer |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/op_selection.py | {
"start": 847,
"end": 1373
} | class ____:
def __init__(self, query: Iterable[str]):
self.query = query
def resolve(self, graph_def: GraphDefinition) -> AbstractSet[str]:
if any(["." in item for item in self.query]):
resolved_node_paths = set(self.query)
_validate_node_paths(resolved_node_paths, graph_def)
else:
# validation happens inside parse_op_queries
resolved_node_paths = set(parse_op_queries(graph_def, list(self.query)))
return resolved_node_paths
| OpSelection |
python | pytorch__pytorch | torch/_inductor/fx_passes/split_cat.py | {
"start": 44824,
"end": 48761
} | class ____(SplitCatSimplifier):
"""
Helper class to merge Unbind->Cat/Stack. Many of the cases are similar to SplitCatSimplifier.
Unbind can't be simplified like splits. So, we can only remove the unbind node. Other than this,
other cases like multiple users, additional args, dim mismatch are similar to `SplitCatSimplifier`,
hence we extend that class.
"""
def remove_unbind(
self,
graph: torch.fx.Graph,
unbind_node: torch.fx.Node,
):
if not is_node_meta_valid(unbind_node):
return
# we need to check if the getitem indices from unbind are consecutive and all go to the same cat node
# before we do the unbind remove, otherwise it will hit the error when we unbind part of them
getitem_indices = [getitem_node.args[1] for getitem_node in unbind_node.users]
if not is_sorted_and_consecutive(getitem_indices) or len( # type: ignore[arg-type]
getitem_indices
) != len(unbind_node.meta["example_value"]):
return
num_unbind = len(getitem_indices)
split_sections = [1 for _ in range(num_unbind)] # type: ignore[operator, arg-type]
super().simplify(graph, unbind_node, split_sections)
def get_simplified_split_ranges(
self,
split_sections: list[int],
next_users: list[torch.fx.Node],
user_inputs_list: list[list[torch.fx.Node | _Range]],
) -> list[_Range] | None:
simplified_split_ranges = super().get_simplified_split_ranges(
split_sections, next_users, user_inputs_list
)
if not simplified_split_ranges or len(simplified_split_ranges) != 1:
return None
return simplified_split_ranges
def get_transform_params(
self,
split_node: torch.fx.Node,
next_users: list[torch.fx.Node],
user_inputs_list: list[list[torch.fx.Node | _Range]],
) -> list[list[_TransformParam]] | None:
"""
Figure out what transforms are needed for each input to each cat node.
Here is the rough transforms we apply:
x -> unbind -> stack => x -> movedim
x -> unbind -> cat => x -> movedim -> flatten
When cat/stack nodes have additional args:
addn ---| addn -> unsqueeze ---|
x -> unbind -> stack => x -> movedim -> cat
addn ---| addn ---|
x -> unbind -> cat => x -> movedim -> flatten -> cat
(Note application of these depends on the dims as well)
"""
split_dim = _get_dim(split_node)
transform_params_list: list[list[_TransformParam]] = []
for user_node, user_inputs in zip(next_users, user_inputs_list):
cat_dim = get_arg_value(user_node, 1, "dim") or 0
transform_params: list[_TransformParam] = []
for user_input in user_inputs:
if isinstance(user_input, tuple):
# User input is coming from unbind
movedim_params = (
(split_dim, cat_dim) if split_dim != cat_dim else None
)
flatten_params = None
if user_node.target is torch.cat:
flatten_params = (cat_dim, cat_dim + 1)
transform_params.append(
(None, movedim_params, None, flatten_params)
)
elif (
user_node.target is torch.stack
): # We need to unsqueeze inputs not coming through unbind into cat
transform_params.append((None, None, (cat_dim,), None))
else: # Non-unbind inputs
transform_params.append((None, None, None, None))
transform_params_list.append(transform_params)
return transform_params_list
| UnbindCatRemover |
python | ipython__ipython | tests/test_ultratb.py | {
"start": 3893,
"end": 4577
} | class ____(unittest.TestCase):
"""
Regression test for the following issues:
https://github.com/ipython/ipython/issues/8293
https://github.com/ipython/ipython/issues/8205
"""
def test_nested_genexpr(self):
code = dedent(
"""\
class SpecificException(Exception):
pass
def foo_8293(x):
raise SpecificException("Success!")
sum(sum(foo_8293(x) for _ in [0]) for x in [0])
"""
)
with tt.AssertPrints("SpecificException: Success!", suppress=False):
ip.run_cell(code)
indentationerror_file = """if True:
zoom()
"""
| NestedGenExprTestCase |
python | pydata__xarray | xarray/core/options.py | {
"start": 5974,
"end": 13716
} | class ____:
"""
Set options for xarray in a controlled context.
Parameters
----------
arithmetic_join : {"inner", "outer", "left", "right", "exact"}, default: "inner"
DataArray/Dataset alignment in binary operations:
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
chunk_manager : str, default: "dask"
Chunk manager to use for chunked array computations when multiple
options are installed.
cmap_divergent : str or matplotlib.colors.Colormap, default: "RdBu_r"
Colormap to use for divergent data plots. If string, must be
matplotlib built-in colormap. Can also be a Colormap object
(e.g. mpl.colormaps["magma"])
cmap_sequential : str or matplotlib.colors.Colormap, default: "viridis"
Colormap to use for nondivergent data plots. If string, must be
matplotlib built-in colormap. Can also be a Colormap object
(e.g. mpl.colormaps["magma"])
display_expand_attrs : {"default", True, False}
Whether to expand the attributes section for display of
``DataArray`` or ``Dataset`` objects. Can be
* ``True`` : to always expand attrs
* ``False`` : to always collapse attrs
* ``default`` : to expand unless over a pre-defined limit
display_expand_coords : {"default", True, False}
Whether to expand the coordinates section for display of
``DataArray`` or ``Dataset`` objects. Can be
* ``True`` : to always expand coordinates
* ``False`` : to always collapse coordinates
* ``default`` : to expand unless over a pre-defined limit
display_expand_data : {"default", True, False}
Whether to expand the data section for display of ``DataArray``
objects. Can be
* ``True`` : to always expand data
* ``False`` : to always collapse data
* ``default`` : to expand unless over a pre-defined limit
display_expand_data_vars : {"default", True, False}
Whether to expand the data variables section for display of
``Dataset`` objects. Can be
* ``True`` : to always expand data variables
* ``False`` : to always collapse data variables
* ``default`` : to expand unless over a pre-defined limit
display_expand_indexes : {"default", True, False}
Whether to expand the indexes section for display of
``DataArray`` or ``Dataset``. Can be
* ``True`` : to always expand indexes
* ``False`` : to always collapse indexes
* ``default`` : to expand unless over a pre-defined limit (always collapse for html style)
display_max_children : int, default: 6
Maximum number of children to display for each node in a DataTree.
display_max_rows : int, default: 12
Maximum display rows.
display_values_threshold : int, default: 200
Total number of array elements which trigger summarization rather
than full repr for variable data views (numpy arrays).
display_style : {"text", "html"}, default: "html"
Display style to use in jupyter for xarray objects.
display_width : int, default: 80
Maximum display width for ``repr`` on xarray objects.
file_cache_maxsize : int, default: 128
Maximum number of open files to hold in xarray's
global least-recently-usage cached. This should be smaller than
your system's per-process file descriptor limit, e.g.,
``ulimit -n`` on Linux.
keep_attrs : {"default", True, False}
Whether to keep attributes on xarray Datasets/dataarrays after
operations. Can be
* ``True`` : to always keep attrs
* ``False`` : to always discard attrs
* ``default`` : to use original logic that attrs should only
be kept in unambiguous circumstances
netcdf_engine_order : sequence, default ['netcdf4', 'h5netcdf', 'scipy']
Preference order of backend engines to use when reading or writing
netCDF files with ``open_dataset()`` and ``to_netcdf()`` if ``engine``
is not explicitly specified. May be any permutation or subset of
``['netcdf4', 'h5netcdf', 'scipy']``.
use_bottleneck : bool, default: True
Whether to use ``bottleneck`` to accelerate 1D reductions and
1D rolling reduction operations.
use_flox : bool, default: True
Whether to use ``numpy_groupies`` and `flox`` to
accelerate groupby and resampling reductions.
use_new_combine_kwarg_defaults : bool, default False
Whether to use new kwarg default values for combine functions:
:py:func:`~xarray.concat`, :py:func:`~xarray.merge`,
:py:func:`~xarray.open_mfdataset`. New values are:
* ``data_vars``: None
* ``coords``: "minimal"
* ``compat``: "override"
* ``join``: "exact"
use_numbagg : bool, default: True
Whether to use ``numbagg`` to accelerate reductions.
Takes precedence over ``use_bottleneck`` when both are True.
use_opt_einsum : bool, default: True
Whether to use ``opt_einsum`` to accelerate dot products.
warn_for_unclosed_files : bool, default: False
Whether or not to issue a warning when unclosed files are
deallocated. This is mostly useful for debugging.
Examples
--------
It is possible to use ``set_options`` either as a context manager:
>>> ds = xr.Dataset({"x": np.arange(1000)})
>>> with xr.set_options(display_width=40):
... print(ds)
...
<xarray.Dataset> Size: 8kB
Dimensions: (x: 1000)
Coordinates:
* x (x) int64 8kB 0 1 ... 999
Data variables:
*empty*
Or to set global options:
>>> xr.set_options(display_width=80) # doctest: +ELLIPSIS
<xarray.core.options.set_options object at 0x...>
"""
def __init__(self, **kwargs):
self.old = {}
for k, v in kwargs.items():
if k not in OPTIONS:
raise ValueError(
f"argument name {k!r} is not in the set of valid options {set(OPTIONS)!r}"
)
if k in _VALIDATORS and not _VALIDATORS[k](v):
if k == "arithmetic_join":
expected = f"Expected one of {_JOIN_OPTIONS!r}"
elif k == "display_style":
expected = f"Expected one of {_DISPLAY_OPTIONS!r}"
elif k == "netcdf_engine_order":
expected = f"Expected a subset of {sorted(_NETCDF_ENGINES)}"
else:
expected = ""
raise ValueError(
f"option {k!r} given an invalid value: {v!r}. " + expected
)
self.old[k] = OPTIONS[k]
self._apply_update(kwargs)
def _apply_update(self, options_dict):
for k, v in options_dict.items():
if k in _SETTERS:
_SETTERS[k](v)
OPTIONS.update(options_dict)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
self._apply_update(self.old)
def get_options():
"""
Get options for xarray.
See Also
----------
set_options
"""
return FrozenDict(OPTIONS)
| set_options |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/embed/base.py | {
"start": 938,
"end": 2598
} | class ____:
"""
Represents a rich embed object.
Some fields are not implemented, add to this as needed.
https://discord.com/developers/docs/resources/channel#embed-object
"""
def __init__(
self,
title: str | None = None,
description: str | None = None,
url: str | None = None,
color: int | None = None,
footer: DiscordMessageEmbedFooter | None = None,
fields: Iterable[DiscordMessageEmbedField] | None = None,
timestamp: datetime | None = None,
image: DiscordMessageEmbedImage | None = None,
) -> None:
self.title = title
self.description = description
self.url = url
self.color = color
self.footer = footer
self.fields = fields
self.timestamp = timestamp
self.image = image
def build(self) -> DiscordMessageEmbedDict:
embed: DiscordMessageEmbedDict = {}
if self.title is not None:
embed["title"] = self.title
if self.description is not None:
embed["description"] = self.description
if self.url is not None:
embed["url"] = self.url
if self.color is not None:
embed["color"] = self.color
if self.footer is not None:
embed["footer"] = self.footer.build()
if self.fields is not None:
embed["fields"] = [field.build() for field in self.fields]
if self.timestamp is not None:
embed["timestamp"] = self.timestamp.isoformat()
if self.image is not None:
embed["image"] = self.image.build()
return embed
| DiscordMessageEmbed |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/cdxgen.py | {
"start": 18526,
"end": 27423
} | class ____(SbomApplicationJob):
provider_id: str
provider_version: str
folder_name: str
def get_job_name(self) -> str:
return f"{self.provider_id}:{self.provider_version}:python{self.python_version}"
def produce(self, output: Output | None, port: int, github_token: str | None) -> tuple[int, str]:
import requests
get_console(output=output).print(
f"[info]Updating sbom for provider {self.provider_id} version {self.provider_version} and python "
f"{self.python_version}"
)
get_console(output=output).print(
f"[info]Generating sbom for provider {self.provider_id} version {self.provider_version} and "
f"python {self.python_version}"
)
url = (
f"http://127.0.0.1:{port}/sbom?path=/app/{TARGET_DIR_NAME}/{self.folder_name}/python{self.python_version}/without-core&"
f"project-name={self.provider_version}&project-version={self.provider_version}&multiProject=true"
)
get_console(output=output).print(f"[info]Triggering sbom generation via {url}")
if not get_dry_run():
response = requests.get(url)
if response.status_code != 200:
get_console(output=output).print(
f"[error]Generation for Airflow {self.provider_id}:{self.provider_version}:"
f"{self.python_version} failed. Status code {response.status_code}"
)
return (
response.status_code,
f"SBOM Generate {self.provider_id}:{self.provider_version}:{self.python_version}",
)
self.target_path.write_bytes(response.content)
get_console(output=output).print(
f"[success]Generated SBOM for {self.provider_id}:{self.provider_version}:"
f"{self.python_version}"
)
return 0, f"SBOM Generate {self.provider_id}:{self.provider_version}:{self.python_version}"
def produce_sbom_for_application_via_cdxgen_server(
job: SbomApplicationJob,
output: Output | None,
github_token: str | None,
port_map: dict[str, int] | None = None,
) -> tuple[int, str]:
"""
Produces SBOM for application using cdxgen server.
:param job: Job to run
:param output: Output to use
:param github_token: GitHub token to use for downloading files`
:param port_map map of process name to port - making sure that one process talks to one server
in case parallel processing is used
:return: tuple with exit code and output
"""
if port_map is None:
port = 9090
else:
port = port_map[multiprocessing.current_process().name]
get_console(output=output).print(f"[info]Using port {port}")
return job.produce(output, port, github_token)
def convert_licenses(licenses: list[dict[str, Any]]) -> str:
license_strings = []
for license in licenses:
if "license" in license:
if "id" in license["license"]:
license_strings.append(license["license"]["id"])
elif "name" in license["license"]:
license_strings.append(license["license"]["name"])
else:
raise ValueError(f"Unknown license format: {license}")
elif "expression" in license:
license_strings.append(license["expression"])
else:
raise ValueError(f"Unknown license format: {license}")
return ", ".join(license_strings)
def get_vcs(dependency: dict[str, Any]) -> str:
if "externalReferences" in dependency:
for reference in dependency["externalReferences"]:
if reference["type"] == "vcs":
return reference["url"].replace("http://", "https://")
return ""
def get_pypi_link(dependency: dict[str, Any]) -> str:
if "purl" in dependency and "pkg:pypi" in dependency["purl"]:
package, version = dependency["purl"][len("pkg:pypi/") :].split("@")
return f"https://pypi.org/project/{package}/{version}/"
return ""
OPEN_PSF_CHECKS = [
"Code-Review",
"Maintained",
"CII-Best-Practices",
"License",
"Binary-Artifacts",
"Dangerous-Workflow",
"Token-Permissions",
"Pinned-Dependencies",
"Branch-Protection",
"Signed-Releases",
"Security-Policy",
"Dependency-Update-Tool",
"Contributors",
"CI-Tests",
"Fuzzing",
"Packaging",
"Vulnerabilities",
"SAST",
]
CHECK_DOCS: dict[str, str] = {}
def get_github_stats(
vcs: str, project_name: str, github_token: str | None, console: Console
) -> dict[str, Any]:
import requests
result = {}
if vcs and vcs.startswith("https://github.com/"):
importance = "Low"
api_url = vcs.replace("https://github.com/", "https://api.github.com/repos/")
if api_url.endswith("/"):
api_url = api_url[:-1]
headers = {"Authorization": f"token {github_token}"} if github_token else {}
console.print(f"[bright_blue]Retrieving GitHub Stats from {api_url}")
response = requests.get(api_url, headers=headers)
if response.status_code == 404:
console.print(f"[yellow]GitHub API returned 404 for {api_url}")
return {}
response.raise_for_status()
github_data = response.json()
stargazer_count = github_data.get("stargazers_count")
forks_count = github_data.get("forks_count")
if project_name in get_project_metadata(MetadataFromSpreadsheet.KNOWN_LOW_IMPORTANCE_PROJECTS):
importance = "Low"
elif project_name in get_project_metadata(MetadataFromSpreadsheet.KNOWN_MEDIUM_IMPORTANCE_PROJECTS):
importance = "Medium"
elif project_name in get_project_metadata(MetadataFromSpreadsheet.KNOWN_HIGH_IMPORTANCE_PROJECTS):
importance = "High"
elif forks_count > 1000 or stargazer_count > 1000:
importance = "High"
elif stargazer_count > 100 or forks_count > 100:
importance = "Medium"
result["Industry importance"] = importance
console.print("[green]Successfully retrieved GitHub Stats.")
else:
console.print(f"[yellow]Not retrieving GitHub Stats for {vcs}")
return result
def get_open_psf_scorecard(vcs: str, project_name: str, console: Console) -> dict[str, Any]:
import requests
console.print(f"[info]Retrieving Open PSF Scorecard for {project_name}")
repo_url = vcs.split("://")[1]
open_psf_url = f"https://api.securityscorecards.dev/projects/{repo_url}"
scorecard_response = requests.get(open_psf_url)
if scorecard_response.status_code == 404:
return {}
scorecard_response.raise_for_status()
open_psf_scorecard = scorecard_response.json()
results = {}
results["OPSF-Score"] = open_psf_scorecard["score"]
if "checks" in open_psf_scorecard:
for check in open_psf_scorecard["checks"]:
check_name = check["name"]
score = check["score"]
results["OPSF-" + check_name] = check["score"]
reason = check.get("reason") or ""
if check.get("details"):
reason += "\n".join(check["details"])
results["OPSF-Details-" + check_name] = reason
CHECK_DOCS[check_name] = check["documentation"]["short"] + "\n" + check["documentation"]["url"]
if check_name == "Maintained":
if project_name in get_project_metadata(MetadataFromSpreadsheet.KNOWN_STABLE_PROJECTS):
lifecycle_status = "Stable"
else:
if score == 0:
lifecycle_status = "Abandoned"
elif score < 6:
lifecycle_status = "Somewhat maintained"
else:
lifecycle_status = "Actively maintained"
results["Lifecycle status"] = lifecycle_status
if check_name == "Vulnerabilities":
results["Unpatched Vulns"] = "Yes" if score != 10 else ""
console.print(f"[success]Retrieved Open PSF Scorecard for {project_name}")
return results
def get_governance(vcs: str | None):
if not vcs or not vcs.startswith("https://github.com/"):
return ""
organization = vcs.split("/")[3]
if organization.lower() in get_project_metadata(MetadataFromSpreadsheet.KNOWN_REPUTABLE_FOUNDATIONS):
return "Reputable Foundation"
if organization.lower() in get_project_metadata(MetadataFromSpreadsheet.KNOWN_STRONG_COMMUNITIES):
return "Strong Community"
if organization.lower() in get_project_metadata(MetadataFromSpreadsheet.KNOWN_COMPANIES):
return "Company"
return "Loose community/ Single Person"
def normalize_package_name(name):
import re
return re.sub(r"[-_.]+", "-", name).lower()
| SbomProviderJob |
python | PyCQA__isort | isort/io.py | {
"start": 2067,
"end": 2219
} | class ____(StringIO):
def write(self, *args: Any, **kwargs: Any) -> None: # type: ignore # skipcq: PTC-W0049
pass
Empty = _EmptyIO()
| _EmptyIO |
python | encode__httpx | httpx/_transports/asgi.py | {
"start": 1352,
"end": 5501
} | class ____(AsyncBaseTransport):
"""
A custom AsyncTransport that handles sending requests directly to an ASGI app.
```python
transport = httpx.ASGITransport(
app=app,
root_path="/submount",
client=("1.2.3.4", 123)
)
client = httpx.AsyncClient(transport=transport)
```
Arguments:
* `app` - The ASGI application.
* `raise_app_exceptions` - Boolean indicating if exceptions in the application
should be raised. Default to `True`. Can be set to `False` for use cases
such as testing the content of a client 500 response.
* `root_path` - The root path on which the ASGI application should be mounted.
* `client` - A two-tuple indicating the client IP and port of incoming requests.
```
"""
def __init__(
self,
app: _ASGIApp,
raise_app_exceptions: bool = True,
root_path: str = "",
client: tuple[str, int] = ("127.0.0.1", 123),
) -> None:
self.app = app
self.raise_app_exceptions = raise_app_exceptions
self.root_path = root_path
self.client = client
async def handle_async_request(
self,
request: Request,
) -> Response:
assert isinstance(request.stream, AsyncByteStream)
# ASGI scope.
scope = {
"type": "http",
"asgi": {"version": "3.0"},
"http_version": "1.1",
"method": request.method,
"headers": [(k.lower(), v) for (k, v) in request.headers.raw],
"scheme": request.url.scheme,
"path": request.url.path,
"raw_path": request.url.raw_path.split(b"?")[0],
"query_string": request.url.query,
"server": (request.url.host, request.url.port),
"client": self.client,
"root_path": self.root_path,
}
# Request.
request_body_chunks = request.stream.__aiter__()
request_complete = False
# Response.
status_code = None
response_headers = None
body_parts = []
response_started = False
response_complete = create_event()
# ASGI callables.
async def receive() -> dict[str, typing.Any]:
nonlocal request_complete
if request_complete:
await response_complete.wait()
return {"type": "http.disconnect"}
try:
body = await request_body_chunks.__anext__()
except StopAsyncIteration:
request_complete = True
return {"type": "http.request", "body": b"", "more_body": False}
return {"type": "http.request", "body": body, "more_body": True}
async def send(message: typing.MutableMapping[str, typing.Any]) -> None:
nonlocal status_code, response_headers, response_started
if message["type"] == "http.response.start":
assert not response_started
status_code = message["status"]
response_headers = message.get("headers", [])
response_started = True
elif message["type"] == "http.response.body":
assert not response_complete.is_set()
body = message.get("body", b"")
more_body = message.get("more_body", False)
if body and request.method != "HEAD":
body_parts.append(body)
if not more_body:
response_complete.set()
try:
await self.app(scope, receive, send)
except Exception: # noqa: PIE-786
if self.raise_app_exceptions:
raise
response_complete.set()
if status_code is None:
status_code = 500
if response_headers is None:
response_headers = {}
assert response_complete.is_set()
assert status_code is not None
assert response_headers is not None
stream = ASGIResponseStream(body_parts)
return Response(status_code, headers=response_headers, stream=stream)
| ASGITransport |
python | wireservice__csvkit | csvkit/cli.py | {
"start": 577,
"end": 1552
} | class ____:
"""
A proxy for a File object that delays opening it until
a read method is called.
Currently this implements only the minimum methods to be useful,
but it could easily be expanded.
"""
def __init__(self, init, *args, **kwargs):
self.init = init
self.f = None
self._is_lazy_opened = False
self._lazy_args = args
self._lazy_kwargs = kwargs
def __getattr__(self, name):
self._open()
return getattr(self.f, name)
def __iter__(self):
return self
def close(self):
if self._is_lazy_opened:
self.f.close()
self.f = None
self._is_lazy_opened = False
def __next__(self):
self._open()
return next(self.f).replace('\0', '')
def _open(self):
if not self._is_lazy_opened:
self.f = self.init(*self._lazy_args, **self._lazy_kwargs)
self._is_lazy_opened = True
| LazyFile |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 131522,
"end": 132121
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of AddProjectColumn"""
__schema__ = github_schema
__field_names__ = ("project_id", "name", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The Node ID of the project."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the column."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| AddProjectColumnInput |
python | weaviate__weaviate-python-client | weaviate/backup/backup_location.py | {
"start": 550,
"end": 684
} | class ____(_BackupLocationConfig):
"""The dynamic location of a backup for GCP."""
path: str
bucket: str
| _BackupLocationGCP |
python | python-openxml__python-docx | src/docx/opc/pkgreader.py | {
"start": 8640,
"end": 9588
} | class ____:
"""Read-only sequence of |_SerializedRelationship| instances corresponding to the
relationships item XML passed to constructor."""
def __init__(self):
super(_SerializedRelationships, self).__init__()
self._srels = []
def __iter__(self):
"""Support iteration, e.g. 'for x in srels:'."""
return self._srels.__iter__()
@staticmethod
def load_from_xml(baseURI, rels_item_xml):
"""Return |_SerializedRelationships| instance loaded with the relationships
contained in `rels_item_xml`.
Returns an empty collection if `rels_item_xml` is |None|.
"""
srels = _SerializedRelationships()
if rels_item_xml is not None:
rels_elm = parse_xml(rels_item_xml)
for rel_elm in rels_elm.Relationship_lst:
srels._srels.append(_SerializedRelationship(baseURI, rel_elm))
return srels
| _SerializedRelationships |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column.py | {
"start": 104187,
"end": 106138
} | class ____(_CategoricalColumn,
collections.namedtuple(
'_HashedCategoricalColumn',
['key', 'hash_bucket_size', 'dtype'])):
"""see `categorical_column_with_hash_bucket`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(input_tensor.indices,
sparse_id_values,
input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
| _HashedCategoricalColumn |
python | jina-ai__jina | tests/k8s/conftest.py | {
"start": 338,
"end": 8578
} | class ____:
def __init__(self, kind_cluster: KindCluster, logger: JinaLogger) -> None:
self._cluster = kind_cluster
self._cluster.ensure_kubectl()
self._kube_config_path = os.path.join(
os.getcwd(), '.pytest-kind/pytest-kind/kubeconfig'
)
self._log = logger
self._set_kube_config()
self._install_linkerd(kind_cluster)
self._loaded_images = set()
def _linkerd_install_cmd(
self, kind_cluster: KindCluster, cmd, tool_name: str
) -> None:
self._log.info(f'Installing {tool_name} to Cluster...')
kube_out = subprocess.check_output(
(str(kind_cluster.kubectl_path), 'version'),
env=os.environ,
)
self._log.info(f'kubernetes versions: {kube_out}')
# since we need to pipe to commands and the linkerd output can bee too long
# there is a risk of deadlock and hanging tests: https://docs.python.org/3/library/subprocess.html#popen-objects
# to avoid this, the right mechanism is implemented in subprocess.run and subprocess.check_output, but output
# must be piped to a file-like object, not to stdout
proc_stdout = tempfile.TemporaryFile()
proc = subprocess.run(
cmd,
stdout=proc_stdout,
env={"KUBECONFIG": str(kind_cluster.kubeconfig_path)},
)
proc_stdout.seek(0)
kube_out = subprocess.check_output(
(
str(kind_cluster.kubectl_path),
'apply',
'-f',
'-',
),
stdin=proc_stdout,
env=os.environ,
)
returncode = proc.returncode
self._log.info(
f'Installing {tool_name} to Cluster returned code {returncode}, kubectl output was {kube_out}'
)
if returncode is not None and returncode != 0:
raise Exception(f'Installing {tool_name} failed with {returncode}')
def _install_linkerd(self, kind_cluster: KindCluster) -> None:
# linkerd < 2.12: only linkerd install is needed
# in later versions, linkerd install --crds will be needed
self._linkerd_install_cmd(
kind_cluster, [f'{Path.home()}/.linkerd2/bin/linkerd', 'install'], 'Linkerd'
)
self._log.info('check linkerd status')
try:
out = subprocess.check_output(
[f'{Path.home()}/.linkerd2/bin/linkerd', 'check'],
env=os.environ,
stderr=subprocess.STDOUT,
)
print(f'linkerd check yields {out.decode() if out else "nothing"}')
except subprocess.CalledProcessError as e:
print(
f'linkerd check failed with error code {e.returncode} and output {e.output}, and stderr {e.stderr}'
)
raise
def install_linkerd_smi(self) -> None:
self._log.info('Installing Linkerd SMI to Cluster...')
proc = subprocess.Popen(
[f'{Path.home()}/.linkerd2/bin/linkerd-smi', 'install'],
stdout=subprocess.PIPE,
env={"KUBECONFIG": str(self._cluster.kubeconfig_path)},
)
kube_out = subprocess.check_output(
(
str(self._cluster.kubectl_path),
'apply',
'-f',
'-',
),
stdin=proc.stdout,
env=os.environ,
)
self._log.info('Poll status of linkerd smi install')
returncode = proc.poll()
self._log.info(
f'Installing Linkerd to Cluster returned code {returncode}, kubectl output was {kube_out}'
)
if returncode is not None and returncode != 0:
raise Exception(f"Installing linkerd failed with {returncode}")
self._log.info('check linkerd status')
try:
out = subprocess.check_output(
[f'{Path.home()}/.linkerd2/bin/linkerd-smi', 'check'],
env=os.environ,
stderr=subprocess.STDOUT,
)
print(f'linkerd check yields {out.decode() if out else "nothing"}')
except subprocess.CalledProcessError as e:
print(
f'linkerd check failed with error code {e.returncode} and output {e.output}, and stderr {e.stderr}'
)
raise
def _set_kube_config(self):
self._log.info(f'Setting KUBECONFIG to {self._kube_config_path}')
os.environ['KUBECONFIG'] = self._kube_config_path
load_cluster_config()
def load_docker_images(
self, images: List[str], image_tag_map: Dict[str, str]
) -> None:
for image in images:
full_image_name = image + ':' + image_tag_map[image]
if full_image_name not in self._loaded_images:
if image != 'alpine' and image != 'jinaai/jina':
build_docker_image(image, image_tag_map)
self._cluster.load_docker_image(full_image_name)
self._loaded_images.add(full_image_name)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger() -> JinaLogger:
return JinaLogger('kubernetes-testing')
@pytest.fixture(scope='session')
def k8s_cluster(kind_cluster: KindCluster) -> KindClusterWrapper:
return KindClusterWrapper(kind_cluster, JinaLogger('kubernetes-cluster-logger'))
@pytest.fixture
def image_name_tag_map() -> Dict[str, str]:
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
'test-executor-torch': '0.13.1',
'slow-process-executor': '0.14.1',
'executor-merger': '0.1.1',
'set-text-executor': '0.1.1',
'failing-executor': '0.1.1',
'jinaai/jina': 'test-pip',
'custom-gateway': '0.1.1',
'test-stateful-executor': '0.13.1',
'multiprotocol-gateway': '0.1.1',
'slow-load-executor': '0.1.1',
}
def build_docker_image(image_name: str, image_name_tag_map: Dict[str, str]) -> str:
logger = JinaLogger('kubernetes-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version() -> None:
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed
del os.environ['JINA_GATEWAY_IMAGE']
def load_cluster_config() -> None:
import kubernetes
try:
# try loading kube config from disk first
kubernetes.config.load_kube_config()
except kubernetes.config.config_exception.ConfigException:
# if the config could not be read from disk, try loading in cluster config
# this works if we are running inside k8s
kubernetes.config.load_incluster_config()
@pytest.fixture
def docker_images(
request: FixtureRequest,
image_name_tag_map: Dict[str, str],
k8s_cluster: KindClusterWrapper,
) -> List[str]:
image_names: List[str] = request.param
k8s_cluster.load_docker_images(image_names, image_name_tag_map)
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
@contextlib.contextmanager
def shell_portforward(
kubectl_path, pod_or_service, port1, port2, namespace, waiting: float = 1
):
try:
proc = subprocess.Popen(
[
kubectl_path,
'port-forward',
pod_or_service,
f'{port1}:{port2}',
'--namespace',
namespace,
]
)
# Go and the port-forwarding needs some ms to be ready
time.sleep(waiting)
yield None
time.sleep(waiting)
except Exception as err:
# Suppress extension exception
raise OSError(err) from None
finally:
proc.kill()
| KindClusterWrapper |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 29817,
"end": 36194
} | class ____(BaseTest):
def test_str(self):
mod = self.module()
s = str(mod).strip()
self.assertTrue(s.startswith('; ModuleID ='), s)
def test_close(self):
mod = self.module()
str(mod)
mod.close()
with self.assertRaises(ctypes.ArgumentError):
str(mod)
mod.close()
def test_with(self):
mod = self.module()
str(mod)
with mod:
str(mod)
with self.assertRaises(ctypes.ArgumentError):
str(mod)
with self.assertRaises(RuntimeError):
with mod:
pass
def test_name(self):
mod = self.module()
mod.name = "foo"
self.assertEqual(mod.name, "foo")
mod.name = "bar"
self.assertEqual(mod.name, "bar")
def test_source_file(self):
mod = self.module()
self.assertEqual(mod.source_file, "asm_sum.c")
def test_data_layout(self):
mod = self.module()
s = mod.data_layout
self.assertIsInstance(s, str)
mod.data_layout = s
self.assertEqual(s, mod.data_layout)
def test_triple(self):
mod = self.module()
s = mod.triple
self.assertEqual(s, llvm.get_default_triple())
mod.triple = ''
self.assertEqual(mod.triple, '')
def test_verify(self):
# Verify successful
mod = self.module()
self.assertIs(mod.verify(), None)
# Verify failed
mod = self.module(asm_verification_fail)
with self.assertRaises(RuntimeError) as cm:
mod.verify()
s = str(cm.exception)
self.assertIn("%.bug = add i32 1, %.bug", s)
def test_get_function(self):
mod = self.module()
fn = mod.get_function("sum")
self.assertIsInstance(fn, llvm.ValueRef)
self.assertEqual(fn.name, "sum")
with self.assertRaises(NameError):
mod.get_function("foo")
# Check that fn keeps the module instance alive
del mod
str(fn.module)
def test_get_struct_type(self):
mod = self.module()
st_ty = mod.get_struct_type("struct.glob_type")
self.assertEqual(st_ty.name, "struct.glob_type")
# also match struct names of form "%struct.glob_type.{some_index}"
self.assertIsNotNone(re.match(
r'%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }',
str(st_ty)))
with self.assertRaises(NameError):
mod.get_struct_type("struct.doesnt_exist")
def test_get_global_variable(self):
mod = self.module()
gv = mod.get_global_variable("glob")
self.assertIsInstance(gv, llvm.ValueRef)
self.assertEqual(gv.name, "glob")
with self.assertRaises(NameError):
mod.get_global_variable("bar")
# Check that gv keeps the module instance alive
del mod
str(gv.module)
def test_global_variables(self):
mod = self.module()
it = mod.global_variables
del mod
globs = sorted(it, key=lambda value: value.name)
self.assertEqual(len(globs), 4)
self.assertEqual([g.name for g in globs],
["glob", "glob_b", "glob_f", "glob_struct"])
def test_functions(self):
mod = self.module()
it = mod.functions
del mod
funcs = list(it)
self.assertEqual(len(funcs), 1)
self.assertEqual(funcs[0].name, "sum")
def test_structs(self):
mod = self.module()
it = mod.struct_types
del mod
structs = list(it)
self.assertEqual(len(structs), 1)
self.assertIsNotNone(re.match(r'struct\.glob_type(\.[\d]+)?',
structs[0].name))
self.assertIsNotNone(re.match(
r'%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }',
str(structs[0])))
def test_link_in(self):
dest = self.module()
src = self.module(asm_mul)
dest.link_in(src)
self.assertEqual(
sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.get_function("mul")
dest.close()
with self.assertRaises(ctypes.ArgumentError):
src.get_function("mul")
def test_link_in_preserve(self):
dest = self.module()
src2 = self.module(asm_mul)
dest.link_in(src2, preserve=True)
self.assertEqual(
sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.close()
self.assertEqual(sorted(f.name for f in src2.functions), ["mul"])
src2.get_function("mul")
def test_link_in_error(self):
# Raise an error by trying to link two modules with the same global
# definition "sum".
dest = self.module()
src = self.module(asm_sum2)
with self.assertRaises(RuntimeError) as cm:
dest.link_in(src)
self.assertIn("symbol multiply defined", str(cm.exception))
def test_as_bitcode(self):
mod = self.module()
bc = mod.as_bitcode()
# Refer to http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00064 # noqa E501
# and http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00092 # noqa E501
bitcode_wrapper_magic = b'\xde\xc0\x17\x0b'
bitcode_magic = b'BC'
self.assertTrue(bc.startswith(bitcode_magic) or
bc.startswith(bitcode_wrapper_magic))
def test_parse_bitcode_error(self):
with self.assertRaises(RuntimeError) as cm:
llvm.parse_bitcode(b"")
self.assertIn("LLVM bitcode parsing error", str(cm.exception))
self.assertIn(
"file too small to contain bitcode header", str(cm.exception),
)
def test_bitcode_roundtrip(self):
# create a new context to avoid struct renaming
context1 = llvm.create_context()
bc = self.module(context=context1).as_bitcode()
context2 = llvm.create_context()
mod = llvm.parse_bitcode(bc, context2)
self.assertEqual(mod.as_bitcode(), bc)
mod.get_function("sum")
mod.get_global_variable("glob")
def test_cloning(self):
m = self.module()
cloned = m.clone()
self.assertIsNot(cloned, m)
self.assertEqual(cloned.as_bitcode(), m.as_bitcode())
| TestModuleRef |
python | lxml__lxml | src/lxml/tests/test_xpathevaluator.py | {
"start": 183,
"end": 18401
} | class ____(HelperTestCase):
"""XPath tests etree"""
def test_xpath_boolean(self):
tree = self.parse('<a><b></b><b></b></a>')
self.assertTrue(tree.xpath('boolean(/a/b)'))
self.assertTrue(not tree.xpath('boolean(/a/c)'))
def test_xpath_number(self):
tree = self.parse('<a>1</a>')
self.assertEqual(1.,
tree.xpath('number(/a)'))
tree = self.parse('<a>A</a>')
actual = str(tree.xpath('number(/a)'))
expected = ['nan', '1.#qnan', 'nanq']
if not actual.lower() in expected:
self.fail('Expected a NAN value, got %s' % actual)
def test_xpath_string(self):
tree = self.parse('<a>Foo</a>')
self.assertEqual('Foo',
tree.xpath('string(/a/text())'))
def test_xpath_document_root(self):
tree = self.parse('<a><b/></a>')
self.assertEqual([],
tree.xpath('/'))
def test_xpath_namespace(self):
tree = self.parse('<a xmlns="test" xmlns:p="myURI"/>')
self.assertTrue((None, "test") in tree.xpath('namespace::*'))
self.assertTrue(('p', 'myURI') in tree.xpath('namespace::*'))
def test_xpath_namespace_empty(self):
tree = self.parse('<a/>')
self.assertEqual([('xml', 'http://www.w3.org/XML/1998/namespace')],
tree.xpath('namespace::*'))
def test_xpath_list_elements(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEqual([root[0], root[1]],
tree.xpath('/a/b'))
def test_xpath_list_nothing(self):
tree = self.parse('<a><b/></a>')
self.assertEqual([],
tree.xpath('/a/c'))
# this seems to pass a different code path, also should return nothing
self.assertEqual([],
tree.xpath('/a/c/text()'))
def test_xpath_list_text(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEqual(['Foo', 'Bar'],
tree.xpath('/a/b/text()'))
def test_xpath_list_text_parent(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEqual(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()'))
self.assertEqual([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_text_parent_no_smart_strings(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEqual(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=True))
self.assertEqual([root[0], root[1]],
[r.getparent() for r in
tree.xpath('/a/b/text()', smart_strings=True)])
self.assertEqual([None, None],
[r.attrname for r in
tree.xpath('/a/b/text()', smart_strings=True)])
self.assertEqual(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=False))
self.assertEqual([False, False],
[hasattr(r, 'getparent') for r in
tree.xpath('/a/b/text()', smart_strings=False)])
self.assertEqual([None, None],
[r.attrname for r in
tree.xpath('/a/b/text()', smart_strings=True)])
def test_xpath_list_unicode_text_parent(self):
xml = b'<a><b>FooBar\\u0680\\u3120</b><b>BarFoo\\u0680\\u3120</b></a>'.decode("unicode_escape")
tree = self.parse(xml.encode('utf-8'))
root = tree.getroot()
self.assertEqual([b'FooBar\\u0680\\u3120'.decode("unicode_escape"),
b'BarFoo\\u0680\\u3120'.decode("unicode_escape")],
tree.xpath('/a/b/text()'))
self.assertEqual([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_attribute(self):
tree = self.parse('<a b="B" c="C"/>')
self.assertEqual(['B'],
tree.xpath('/a/@b'))
def test_xpath_list_attribute_parent(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c')
self.assertEqual(1, len(results))
self.assertEqual('CqWeRtZuI', results[0])
self.assertEqual(tree.getroot().tag, results[0].getparent().tag)
def test_xpath_list_attribute_parent_no_smart_strings(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c', smart_strings=True)
self.assertEqual(1, len(results))
self.assertEqual('CqWeRtZuI', results[0])
self.assertEqual('c', results[0].attrname)
self.assertEqual(tree.getroot().tag, results[0].getparent().tag)
results = tree.xpath('/a/@c', smart_strings=False)
self.assertEqual(1, len(results))
self.assertEqual('CqWeRtZuI', results[0])
self.assertEqual(False, hasattr(results[0], 'getparent'))
self.assertEqual(False, hasattr(results[0], 'attrname'))
def test_xpath_text_from_other_document(self):
xml_data = '''
<table>
<item xml:id="k1"><value>v1</value></item>
<item xml:id="k2"><value>v2</value></item>
</table>
'''
def lookup(dummy, id):
return etree.XML(xml_data).xpath('id(%r)' % id)
functions = {(None, 'lookup') : lookup}
root = etree.XML('<dummy/>')
values = root.xpath("lookup('k1')/value/text()",
extensions=functions)
self.assertEqual(['v1'], values)
self.assertEqual('value', values[0].getparent().tag)
def test_xpath_list_comment(self):
tree = self.parse('<a><!-- Foo --></a>')
self.assertEqual(['<!-- Foo -->'],
list(map(repr, tree.xpath('/a/node()'))))
def test_rel_xpath_boolean(self):
root = etree.XML('<a><b><c/></b></a>')
el = root[0]
self.assertTrue(el.xpath('boolean(c)'))
self.assertTrue(not el.xpath('boolean(d)'))
def test_rel_xpath_list_elements(self):
tree = self.parse('<a><c><b>Foo</b><b>Bar</b></c><c><b>Hey</b></c></a>')
root = tree.getroot()
c = root[0]
self.assertEqual([c[0], c[1]],
c.xpath('b'))
self.assertEqual([c[0], c[1], root[1][0]],
c.xpath('//b'))
def test_xpath_ns(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertEqual(
[root[0]],
tree.xpath('//foo:b', namespaces={'foo': 'uri:a'}))
self.assertEqual(
[],
tree.xpath('//foo:b', namespaces={'foo': 'uri:c'}))
self.assertEqual(
[root[0]],
root.xpath('//baz:b', namespaces={'baz': 'uri:a'}))
def test_xpath_ns_none(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={None: 'uri:a'})
def test_xpath_ns_empty(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={'': 'uri:a'})
def test_xpath_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '\\fad')
def test_xpath_class_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
self.assertRaises(etree.XPathSyntaxError, etree.XPath, '\\fad')
def test_xpath_prefix_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '/fa:d')
def test_xpath_class_prefix_error(self):
tree = self.parse('<a/>')
xpath = etree.XPath("/fa:d")
self.assertRaises(etree.XPathEvalError, xpath, tree)
def test_elementtree_getpath(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(a)
self.assertEqual('/a/c/d',
tree.getpath(d2)[:6])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_elementtree_getpath_partial(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(c)
self.assertEqual('/c/d',
tree.getpath(d2)[:4])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_xpath_evaluator(self):
tree = self.parse('<a><b><c></c></b></a>')
e = etree.XPathEvaluator(tree)
root = tree.getroot()
self.assertEqual(
[root],
e('//a'))
def test_xpath_evaluator_tree(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEqual(
[],
e('a'))
root = child_tree.getroot()
self.assertEqual(
[root[0]],
e('c'))
def test_xpath_evaluator_tree_absolute(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEqual(
[],
e('/a'))
root = child_tree.getroot()
self.assertEqual(
[root],
e('/b'))
self.assertEqual(
[],
e('/c'))
def test_xpath_evaluator_element(self):
tree = self.parse('<a><b><c></c></b></a>')
root = tree.getroot()
e = etree.XPathEvaluator(root[0])
self.assertEqual(
[root[0][0]],
e('c'))
def test_xpath_extensions(self):
def foo(evaluator, a):
return 'hello %s' % a
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertEqual(
"hello you", e("foo('you')"))
def test_xpath_extensions_wrong_args(self):
def foo(evaluator, a, b):
return "hello %s and %s" % (a, b)
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(TypeError, e, "foo('you')")
def test_xpath_extensions_error(self):
def foo(evaluator, a):
return 1/0
extension = {(None, 'foo'): foo}
tree = self.parse('<a/>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(ZeroDivisionError, e, "foo('test')")
def test_xpath_extensions_nodes(self):
def f(evaluator, arg):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo('World')/result")
self.assertEqual(2, len(r))
self.assertEqual('Hoi', r[0].text)
self.assertEqual('Dag', r[1].text)
def test_xpath_extensions_nodes_append(self):
def f(evaluator, nodes):
r = etree.SubElement(nodes[0], 'results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEqual(2, len(r))
self.assertEqual('Hoi', r[0].text)
self.assertEqual('Dag', r[1].text)
def test_xpath_extensions_nodes_append2(self):
def f(evaluator, nodes):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
r.append(nodes[0])
return r
x = self.parse('<result>Honk</result>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEqual(3, len(r))
self.assertEqual('Hoi', r[0].text)
self.assertEqual('Dag', r[1].text)
self.assertEqual('Honk', r[2].text)
def test_xpath_context_node(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = []
def check_context(ctxt, nodes):
self.assertEqual(len(nodes), 1)
check_call.append(nodes[0].tag)
self.assertEqual(ctxt.context_node, nodes[0])
return True
find = etree.XPath("//*[p:foo(.)]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
find(tree)
check_call.sort()
self.assertEqual(check_call, ["a", "b", "c", "root"])
def test_xpath_eval_context_propagation(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = {}
def check_context(ctxt, nodes):
self.assertEqual(len(nodes), 1)
tag = nodes[0].tag
# empty during the "b" call, a "b" during the "c" call
check_call[tag] = ctxt.eval_context.get("b")
ctxt.eval_context[tag] = tag
return True
find = etree.XPath("//b[p:foo(.)]/c[p:foo(.)]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEqual(result, [tree.getroot()[1][0]])
self.assertEqual(check_call, {'b':None, 'c':'b'})
def test_xpath_eval_context_clear(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = {}
def check_context(ctxt):
check_call["done"] = True
# context must be empty for each new evaluation
self.assertEqual(len(ctxt.eval_context), 0)
ctxt.eval_context["test"] = True
return True
find = etree.XPath("//b[p:foo()]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEqual(result, [tree.getroot()[1]])
self.assertEqual(check_call["done"], True)
check_call.clear()
find = etree.XPath("//b[p:foo()]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEqual(result, [tree.getroot()[1]])
self.assertEqual(check_call["done"], True)
def test_xpath_variables(self):
x = self.parse('<a attr="true"/>')
e = etree.XPathEvaluator(x)
expr = "/a[@attr=$aval]"
r = e(expr, aval=1)
self.assertEqual(0, len(r))
r = e(expr, aval="true")
self.assertEqual(1, len(r))
self.assertEqual("true", r[0].get('attr'))
r = e(expr, aval=True)
self.assertEqual(1, len(r))
self.assertEqual("true", r[0].get('attr'))
def test_xpath_variables_nodeset(self):
x = self.parse('<a attr="true"/>')
e = etree.XPathEvaluator(x)
element = etree.Element("test-el")
etree.SubElement(element, "test-sub")
expr = "$value"
r = e(expr, value=element)
self.assertEqual(1, len(r))
self.assertEqual(element.tag, r[0].tag)
self.assertEqual(element[0].tag, r[0][0].tag)
def test_xpath_extensions_mix(self):
x = self.parse('<a attr="true"><test/></a>')
class LocalException(Exception):
pass
def foo(evaluator, a, varval):
etree.Element("DUMMY")
if varval == 0:
raise LocalException
elif varval == 1:
return ()
elif varval == 2:
return None
elif varval == 3:
return a[0][0]
a = a[0]
if a.get("attr") == str(varval):
return a
else:
return etree.Element("NODE")
extension = {(None, 'foo'): foo}
e = etree.XPathEvaluator(x, extensions=[extension])
del x
self.assertRaises(LocalException, e, "foo(., 0)")
self.assertRaises(LocalException, e, "foo(., $value)", value=0)
r = e("foo(., $value)", value=1)
self.assertEqual(len(r), 0)
r = e("foo(., 1)")
self.assertEqual(len(r), 0)
r = e("foo(., $value)", value=2)
self.assertEqual(len(r), 0)
r = e("foo(., $value)", value=3)
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "test")
r = e("foo(., $value)", value="false")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "NODE")
r = e("foo(., 'false')")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "NODE")
r = e("foo(., 'true')")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "a")
self.assertEqual(r[0][0].tag, "test")
r = e("foo(., $value)", value="true")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "a")
self.assertRaises(LocalException, e, "foo(., 0)")
self.assertRaises(LocalException, e, "foo(., $value)", value=0)
| ETreeXPathTestCase |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/components/integrations/test_omni_utils.py | {
"start": 259,
"end": 2536
} | class ____(OmniWorkspace):
async def fetch_omni_state(self) -> OmniWorkspaceData:
"""Returns mock Omni workspace data."""
# Create mock folder
folder = OmniFolder(
id="folder_1",
name="Analytics",
path="Analytics",
scope="shared",
)
# Create mock owner
owner = OmniOwner(
id="user_1",
name="Test User",
)
# Create mock user
user = OmniUser(
id="user_1",
name="Test User",
display_name="Test User",
user_name="testuser",
active=True,
primary_email="test@example.com",
groups=["analytics"],
created="2024-01-01T00:00:00Z",
last_modified="2024-01-01T00:00:00Z",
)
# Create mock documents
sales_dashboard = OmniDocument(
identifier="doc_1",
name="sales_dashboard",
scope="shared",
connection_id="conn_1",
deleted=False,
has_dashboard=True,
type="dashboard",
updated_at="2024-01-01T00:00:00Z",
folder=folder,
owner=owner,
labels=[],
queries=[],
)
revenue_report = OmniDocument(
identifier="doc_2",
name="revenue_report",
scope="shared",
connection_id="conn_1",
deleted=False,
has_dashboard=True,
type="dashboard",
updated_at="2024-01-01T00:00:00Z",
folder=folder,
owner=owner,
labels=[],
queries=[],
)
customer_analysis = OmniDocument(
identifier="doc_3",
name="customer_analysis",
scope="shared",
connection_id="conn_1",
deleted=False,
has_dashboard=True,
type="dashboard",
updated_at="2024-01-01T00:00:00Z",
folder=folder,
owner=owner,
labels=[],
queries=[],
)
return OmniWorkspaceData(
documents=[sales_dashboard, revenue_report, customer_analysis],
users=[user],
)
| MockOmniWorkspace |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess25.py | {
"start": 1283,
"end": 1378
} | class ____(ClassA[int]):
pass
ClassC.x = 1
ClassC.x
del ClassC.x
ClassC.x
del ClassC.x
| ClassC |
python | scipy__scipy | scipy/optimize/tests/test__shgo.py | {
"start": 8304,
"end": 10297
} | class ____(StructTestFunction):
"""
Test function with no feasible domain.
"""
def f(self, x, *args):
return x[0] ** 2 + x[1] ** 2
def g1(x):
return x[0] + x[1] - 1
def g2(x):
return -(x[0] + x[1] - 1)
def g3(x):
return -x[0] + x[1] - 1
def g4(x):
return -(-x[0] + x[1] - 1)
g = (g1, g2, g3, g4)
cons = wrap_constraints(g)
test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)],
expected_fun=None,
expected_x=None
)
@pytest.mark.skip("Not a test")
def run_test(test, args=(), test_atol=1e-5, n=100, iters=None,
callback=None, minimizer_kwargs=None, options=None,
sampling_method='sobol', workers=1):
res = shgo(test.f, test.bounds, args=args, constraints=test.cons,
n=n, iters=iters, callback=callback,
minimizer_kwargs=minimizer_kwargs, options=options,
sampling_method=sampling_method, workers=workers)
print(f'res = {res}')
logging.info(f'res = {res}')
if test.expected_x is not None:
np.testing.assert_allclose(res.x, test.expected_x,
rtol=test_atol,
atol=test_atol)
# (Optional tests)
if test.expected_fun is not None:
np.testing.assert_allclose(res.fun,
test.expected_fun,
atol=test_atol)
if test.expected_xl is not None:
np.testing.assert_allclose(res.xl,
test.expected_xl,
atol=test_atol)
if test.expected_funl is not None:
np.testing.assert_allclose(res.funl,
test.expected_funl,
atol=test_atol)
return
# Base test functions:
| StructTestInfeasible |
python | PrefectHQ__prefect | src/prefect/blocks/notifications.py | {
"start": 15074,
"end": 19467
} | class ____(AbstractAppriseNotificationBlock):
"""
Enables sending notifications via a provided Opsgenie webhook.
See [Apprise notify_opsgenie docs](https://github.com/caronc/apprise/wiki/Notify_opsgenie)
for more info on formatting the URL.
Examples:
Load a saved Opsgenie webhook and send a message:
```python
from prefect.blocks.notifications import OpsgenieWebhook
opsgenie_webhook_block = OpsgenieWebhook.load("BLOCK_NAME")
opsgenie_webhook_block.notify("Hello from Prefect!")
```
"""
_description = "Enables sending notifications via a provided Opsgenie webhook."
_block_type_name = "Opsgenie Webhook"
_block_type_slug = "opsgenie-webhook"
_logo_url = HttpUrl(
"https://cdn.sanity.io/images/3ugk85nk/production/d8b5bc6244ae6cd83b62ec42f10d96e14d6e9113-280x280.png"
)
_documentation_url = HttpUrl(
"https://docs.prefect.io/latest/automate/events/automations-triggers#sending-notifications-with-automations"
)
apikey: SecretStr = Field(
default=...,
title="API Key",
description="The API Key associated with your Opsgenie account.",
)
target_user: Optional[list[str]] = Field(
default=None, description="The user(s) you wish to notify."
)
target_team: Optional[list[str]] = Field(
default=None, description="The team(s) you wish to notify."
)
target_schedule: Optional[list[str]] = Field(
default=None, description="The schedule(s) you wish to notify."
)
target_escalation: Optional[list[str]] = Field(
default=None, description="The escalation(s) you wish to notify."
)
region_name: Literal["us", "eu"] = Field(
default="us", description="The 2-character region code."
)
batch: bool = Field(
default=False,
description="Notify all targets in batches (instead of individually).",
)
tags: Optional[list[str]] = Field(
default=None,
description=(
"A comma-separated list of tags you can associate with your Opsgenie"
" message."
),
examples=['["tag1", "tag2"]'],
)
priority: Optional[int] = Field(
default=3,
description=(
"The priority to associate with the message. It is on a scale between 1"
" (LOW) and 5 (EMERGENCY)."
),
)
alias: Optional[str] = Field(
default=None, description="The alias to associate with the message."
)
entity: Optional[str] = Field(
default=None, description="The entity to associate with the message."
)
details: Optional[dict[str, str]] = Field(
default=None,
description="Additional details composed of key/values pairs.",
examples=['{"key1": "value1", "key2": "value2"}'],
)
def block_initialization(self) -> None:
try:
# Try importing for apprise>=1.18.0
from apprise.plugins.opsgenie import NotifyOpsgenie
except ImportError:
# Fallback for versions apprise<1.18.0
from apprise.plugins.NotifyOpsgenie import ( # pyright: ignore[reportMissingImports] this is a fallback
NotifyOpsgenie, # pyright: ignore[reportUnknownVariableType] incomplete type hints in apprise
)
targets: list[str] = []
if self.target_user:
[targets.append(f"@{x}") for x in self.target_user]
if self.target_team:
[targets.append(f"#{x}") for x in self.target_team]
if self.target_schedule:
[targets.append(f"*{x}") for x in self.target_schedule]
if self.target_escalation:
[targets.append(f"^{x}") for x in self.target_escalation]
url = SecretStr(
NotifyOpsgenie(
apikey=self.apikey.get_secret_value(),
targets=targets,
region_name=self.region_name,
details=self.details,
priority=self.priority,
alias=self.alias,
entity=self.entity,
batch=self.batch,
tags=self.tags,
action="new",
).url() # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType] incomplete type hints in apprise
)
self._start_apprise_client(url)
| OpsgenieWebhook |
python | django__django | django/contrib/auth/models.py | {
"start": 10936,
"end": 15585
} | class ____(models.Model):
"""
Add the fields and methods necessary to support the Group and Permission
models using the ModelBackend.
"""
is_superuser = models.BooleanField(
_("superuser status"),
default=False,
help_text=_(
"Designates that this user has all permissions without "
"explicitly assigning them."
),
)
groups = models.ManyToManyField(
Group,
verbose_name=_("groups"),
blank=True,
help_text=_(
"The groups this user belongs to. A user will get all permissions "
"granted to each of their groups."
),
related_name="user_set",
related_query_name="user",
)
user_permissions = models.ManyToManyField(
Permission,
verbose_name=_("user permissions"),
blank=True,
help_text=_("Specific permissions for this user."),
related_name="user_set",
related_query_name="user",
)
class Meta:
abstract = True
def get_user_permissions(self, obj=None):
"""
Return a list of permission strings that this user has directly.
Query all available auth backends. If an object is passed in,
return only permissions matching this object.
"""
return _user_get_permissions(self, obj, "user")
async def aget_user_permissions(self, obj=None):
"""See get_user_permissions()"""
return await _auser_get_permissions(self, obj, "user")
def get_group_permissions(self, obj=None):
"""
Return a list of permission strings that this user has through their
groups. Query all available auth backends. If an object is passed in,
return only permissions matching this object.
"""
return _user_get_permissions(self, obj, "group")
async def aget_group_permissions(self, obj=None):
"""See get_group_permissions()"""
return await _auser_get_permissions(self, obj, "group")
def get_all_permissions(self, obj=None):
return _user_get_permissions(self, obj, "all")
async def aget_all_permissions(self, obj=None):
return await _auser_get_permissions(self, obj, "all")
def has_perm(self, perm, obj=None):
"""
Return True if the user has the specified permission. Query all
available auth backends, but return immediately if any backend returns
True. Thus, a user who has permission from a single auth backend is
assumed to have permission in general. If an object is provided, check
permissions for that object.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
async def ahas_perm(self, perm, obj=None):
"""See has_perm()"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return await _auser_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Return True if the user has each of the specified permissions. If
object is passed, check if the user has all required perms for it.
"""
if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):
raise ValueError("perm_list must be an iterable of permissions.")
return all(self.has_perm(perm, obj) for perm in perm_list)
async def ahas_perms(self, perm_list, obj=None):
"""See has_perms()"""
if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):
raise ValueError("perm_list must be an iterable of permissions.")
for perm in perm_list:
if not await self.ahas_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Return True if the user has any permissions in the given app label.
Use similar logic as has_perm(), above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
async def ahas_module_perms(self, app_label):
"""See has_module_perms()"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return await _auser_has_module_perms(self, app_label)
| PermissionsMixin |
python | apache__airflow | providers/datadog/tests/unit/datadog/hooks/test_datadog.py | {
"start": 1370,
"end": 4861
} | class ____:
def setup_method(self):
with mock.patch("airflow.providers.datadog.hooks.datadog.initialize"):
with mock.patch("airflow.providers.datadog.hooks.datadog.DatadogHook.get_connection") as m:
m.return_value = Connection(
extra=json.dumps(
{
"app_key": APP_KEY,
"api_key": API_KEY,
"api_host": API_HOST,
}
)
)
self.hook = DatadogHook()
@mock.patch("airflow.providers.datadog.hooks.datadog.initialize")
@mock.patch("airflow.providers.datadog.hooks.datadog.DatadogHook.get_connection")
def test_api_key_required(self, mock_get_connection, mock_initialize):
mock_get_connection.return_value = Connection()
with pytest.raises(AirflowException) as ctx:
DatadogHook()
assert str(ctx.value) == "api_key must be specified in the Datadog connection details"
def test_validate_response_valid(self):
try:
self.hook.validate_response({"status": "ok"})
except AirflowException:
self.fail("Unexpected AirflowException raised")
def test_validate_response_invalid(self):
with pytest.raises(AirflowException):
self.hook.validate_response({"status": "error"})
@mock.patch("airflow.providers.datadog.hooks.datadog.api.Metric.send")
def test_send_metric(self, mock_send):
mock_send.return_value = {"status": "ok"}
self.hook.send_metric(
METRIC_NAME,
DATAPOINT,
tags=TAGS,
type_=TYPE,
interval=INTERVAL,
)
mock_send.assert_called_once_with(
metric=METRIC_NAME,
points=DATAPOINT,
host=self.hook.host,
tags=TAGS,
type=TYPE,
interval=INTERVAL,
)
@mock.patch("airflow.providers.datadog.hooks.datadog.api.Metric.query")
@mock.patch("airflow.providers.datadog.hooks.datadog.time.time")
def test_query_metric(self, mock_time, mock_query):
now = 12345
mock_time.return_value = now
mock_query.return_value = {"status": "ok"}
self.hook.query_metric("query", 60, 30)
mock_query.assert_called_once_with(
start=now - 60,
end=now - 30,
query="query",
)
@mock.patch("airflow.providers.datadog.hooks.datadog.api.Event.create")
def test_post_event(self, mock_create):
mock_create.return_value = {"status": "ok"}
self.hook.post_event(
TITLE,
TEXT,
aggregation_key=AGGREGATION_KEY,
alert_type=ALERT_TYPE,
date_happened=DATE_HAPPENED,
handle=HANDLE,
priority=PRIORITY,
related_event_id=RELATED_EVENT_ID,
tags=TAGS,
device_name=DEVICE_NAME,
)
mock_create.assert_called_once_with(
title=TITLE,
text=TEXT,
aggregation_key=AGGREGATION_KEY,
alert_type=ALERT_TYPE,
date_happened=DATE_HAPPENED,
handle=HANDLE,
priority=PRIORITY,
related_event_id=RELATED_EVENT_ID,
tags=TAGS,
host=self.hook.host,
device_name=DEVICE_NAME,
source_type_name=self.hook.source_type_name,
)
| TestDatadogHook |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/77_class__class__.py | {
"start": 44,
"end": 644
} | class ____:
def test_various___class___pathologies(self):
# See issue #12370
class X(): #A):
def f(self):
return super().f()
__class__ = 413
x = X()
class X:
x = __class__
def f():
__class__
class X:
global __class__
__class__ = 42
def f():
__class__
# class X:
# nonlocal __class__
# __class__ = 42
# def f():
# __class__
# self.assertEqual(__class__, 42)
| Foo |
python | doocs__leetcode | solution/1400-1499/1464.Maximum Product of Two Elements in an Array/Solution.py | {
"start": 0,
"end": 224
} | class ____:
def maxProduct(self, nums: List[int]) -> int:
ans = 0
for i, a in enumerate(nums):
for b in nums[i + 1 :]:
ans = max(ans, (a - 1) * (b - 1))
return ans
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/artist.py | {
"start": 48850,
"end": 63911
} | class ____:
"""
A helper class to inspect an `~matplotlib.artist.Artist` and return
information about its settable properties and their current values.
"""
def __init__(self, o):
r"""
Initialize the artist inspector with an `Artist` or an iterable of
`Artist`\s. If an iterable is used, we assume it is a homogeneous
sequence (all `Artist`\s are of the same type) and it is your
responsibility to make sure this is so.
"""
if not isinstance(o, Artist):
if np.iterable(o):
o = list(o)
if len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping property fullnames to sets of aliases for each alias
in the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': {'mfc'},
'linewidth' : {'lw'},
}
"""
names = [name for name in dir(self.o)
if name.startswith(('set_', 'get_'))
and callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
propname = re.search(f"`({name[:4]}.*)`", # get_.*/set_.*
inspect.getdoc(func)).group(1)
aliases.setdefault(propname[4:], set()).add(name[4:])
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*(?:\.\.\s+)?ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the setter for a line that
begins with "ACCEPTS:" or ".. ACCEPTS:", and then by looking for a
numpydoc-style documentation for the setter's first argument.
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError(f'{self.o} has no function {name}')
func = getattr(self.o, name)
if hasattr(func, '_kwarg_doc'):
return func._kwarg_doc
docstring = inspect.getdoc(func)
if docstring is None:
return 'unknown'
if docstring.startswith('Alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return re.sub("\n *", " ", match.group(1))
# Much faster than list(inspect.signature(func).parameters)[1],
# although barely relevant wrt. matplotlib's total import time.
param_name = func.__code__.co_varnames[1]
# We could set the presence * based on whether the parameter is a
# varargs (it can't be a varkwargs) but it's not really worth it.
match = re.search(fr"(?m)^ *\*?{param_name} : (.+)", docstring)
if match:
return match.group(1)
return 'unknown'
def _replace_path(self, source_class):
"""
Changes the full path to the public API path that is used
in sphinx. This is needed for links to work.
"""
replace_dict = {'_base._AxesBase': 'Axes',
'_axes.Axes': 'Axes'}
for key, value in replace_dict.items():
source_class = source_class.replace(key, value)
return source_class
def get_setters(self):
"""
Get the attribute strings with setters for object.
For example, for a line, return ``['markerfacecolor', 'linewidth',
....]``.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
func = getattr(self.o, name)
if (not callable(func)
or self.number_of_parameters(func) < 2
or self.is_alias(func)):
continue
setters.append(name[4:])
return setters
@staticmethod
@cache
def number_of_parameters(func):
"""Return number of parameters of the callable *func*."""
return len(inspect.signature(func).parameters)
@staticmethod
@cache
def is_alias(method):
"""
Return whether the object *method* is an alias for another method.
"""
ds = inspect.getdoc(method)
if ds is None:
return False
return ds.startswith('Alias for ')
def aliased_name(self, s):
"""
Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME'.
For example, for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'.
"""
aliases = ''.join(' or %s' % x for x in sorted(self.aliasd.get(s, [])))
return s + aliases
_NOT_LINKABLE = {
# A set of property setter methods that are not available in our
# current docs. This is a workaround used to prevent trying to link
# these setters which would lead to "target reference not found"
# warnings during doc build.
'matplotlib.image._ImageBase.set_alpha',
'matplotlib.image._ImageBase.set_array',
'matplotlib.image._ImageBase.set_data',
'matplotlib.image._ImageBase.set_filternorm',
'matplotlib.image._ImageBase.set_filterrad',
'matplotlib.image._ImageBase.set_interpolation',
'matplotlib.image._ImageBase.set_interpolation_stage',
'matplotlib.image._ImageBase.set_resample',
'matplotlib.text._AnnotationBase.set_annotation_clip',
}
def aliased_name_rest(self, s, target):
"""
Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME',
formatted for reST.
For example, for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'.
"""
# workaround to prevent "reference target not found"
if target in self._NOT_LINKABLE:
return f'``{s}``'
aliases = ''.join(
f' or :meth:`{a} <{target}>`' for a in sorted(self.aliasd.get(s, [])))
return f':meth:`{s} <{target}>`{aliases}'
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable
properties and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return f'{pad}{prop}: {accepts}'
lines = []
for prop in sorted(self.get_setters()):
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append(f'{pad}{name}: {accepts}')
return lines
def pprint_setters_rest(self, prop=None, leadingspace=4):
"""
If *prop* is *None*, return a list of reST-formatted strings of all
settable properties and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of "property : valid"
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return f'{pad}{prop}: {accepts}'
prop_and_qualnames = []
for prop in sorted(self.get_setters()):
# Find the parent method which actually provides the docstring.
for cls in self.o.__mro__:
method = getattr(cls, f"set_{prop}", None)
if method and method.__doc__ is not None:
break
else: # No docstring available.
method = getattr(self.o, f"set_{prop}")
prop_and_qualnames.append(
(prop, f"{method.__module__}.{method.__qualname__}"))
names = [self.aliased_name_rest(prop, target)
.replace('_base._AxesBase', 'Axes')
.replace('_axes.Axes', 'Axes')
for prop, target in prop_and_qualnames]
accepts = [self.get_valid_values(prop)
for prop, _ in prop_and_qualnames]
col0_len = max(len(n) for n in names)
col1_len = max(len(a) for a in accepts)
table_formatstr = pad + ' ' + '=' * col0_len + ' ' + '=' * col1_len
return [
'',
pad + '.. table::',
pad + ' :class: property-table',
'',
table_formatstr,
pad + ' ' + 'Property'.ljust(col0_len)
+ ' ' + 'Description'.ljust(col1_len),
table_formatstr,
*[pad + ' ' + n.ljust(col0_len) + ' ' + a.ljust(col1_len)
for n, a in zip(names, accepts)],
table_formatstr,
'',
]
def properties(self):
"""Return a dictionary mapping property name -> value."""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_') and callable(getattr(o, name))]
getters.sort()
d = {}
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val = func()
except Exception:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""Return the getters and actual values as list of strings."""
lines = []
for name, val in sorted(self.properties().items()):
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(f' {name} = {s}')
return lines
def getp(obj, property=None):
"""
Return the value of an `.Artist`'s *property*, or print all of them.
Parameters
----------
obj : `~matplotlib.artist.Artist`
The queried artist; e.g., a `.Line2D`, a `.Text`, or an `~.axes.Axes`.
property : str or None, default: None
If *property* is 'somename', this function returns
``obj.get_somename()``.
If it's None (or unset), it *prints* all gettable properties from
*obj*. Many properties have aliases for shorter typing, e.g. 'lw' is
an alias for 'linewidth'. In the output, aliases and full property
names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
See Also
--------
setp
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
return getattr(obj, 'get_' + property)()
# alias
get = getp
def setp(obj, *args, file=None, **kwargs):
"""
Set one or more properties on an `.Artist`, or list allowed values.
Parameters
----------
obj : `~matplotlib.artist.Artist` or list of `.Artist`
The artist(s) whose properties are being set or queried. When setting
properties, all artists are affected; when querying the allowed values,
only the first instance in the sequence is queried.
For example, two lines can be made thicker and red with a single call:
>>> x = arange(0, 1, 0.01)
>>> lines = plot(x, sin(2*pi*x), x, sin(4*pi*x))
>>> setp(lines, linewidth=2, color='r')
file : file-like, default: `sys.stdout`
Where `setp` writes its output when asked to list allowed values.
>>> with open('output.log') as file:
... setp(line, file=file)
The default, ``None``, means `sys.stdout`.
*args, **kwargs
The properties to set. The following combinations are supported:
- Set the linestyle of a line to be dashed:
>>> line, = plot([1, 2, 3])
>>> setp(line, linestyle='--')
- Set multiple properties at once:
>>> setp(line, linewidth=2, color='r')
- List allowed values for a line's linestyle:
>>> setp(line, 'linestyle')
linestyle: {'-', '--', '-.', ':', '', (offset, on-off-seq), ...}
- List all properties that can be set, and their allowed values:
>>> setp(line)
agg_filter: a filter function, ...
[long output listing omitted]
`setp` also supports MATLAB style string/value pairs. For example, the
following are equivalent:
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # Python style
See Also
--------
getp
"""
if isinstance(obj, Artist):
objs = [obj]
else:
objs = list(cbook.flatten(obj))
if not objs:
return
insp = ArtistInspector(objs[0])
if not kwargs and len(args) < 2:
if args:
print(insp.pprint_setters(prop=args[0]), file=file)
else:
print('\n'.join(insp.pprint_setters()), file=file)
return
if len(args) % 2:
raise ValueError('The set args must be string, value pairs')
funcvals = dict(zip(args[::2], args[1::2]))
ret = [o.update(funcvals) for o in objs] + [o.set(**kwargs) for o in objs]
return list(cbook.flatten(ret))
def kwdoc(artist):
r"""
Inspect an `~matplotlib.artist.Artist` class (using `.ArtistInspector`) and
return information about its settable properties and their current values.
Parameters
----------
artist : `~matplotlib.artist.Artist` or an iterable of `Artist`\s
Returns
-------
str
The settable properties of *artist*, as plain text if
:rc:`docstring.hardcopy` is False and as a rst table (intended for
use in Sphinx) if it is True.
"""
ai = ArtistInspector(artist)
return ('\n'.join(ai.pprint_setters_rest(leadingspace=4))
if mpl.rcParams['docstring.hardcopy'] else
'Properties:\n' + '\n'.join(ai.pprint_setters(leadingspace=4)))
# We defer this to the end of them module, because it needs ArtistInspector
# to be defined.
Artist._update_set_signature_and_docstring()
| ArtistInspector |
python | ray-project__ray | python/ray/util/client/common.py | {
"start": 22996,
"end": 25777
} | class ____:
"""Holds the handles to the registered gRPC servicers and their server."""
task_servicer: ray_client_pb2_grpc.RayletDriverServicer
data_servicer: ray_client_pb2_grpc.RayletDataStreamerServicer
logs_servicer: ray_client_pb2_grpc.RayletLogStreamerServicer
grpc_server: grpc.Server
def stop(self, grace: int) -> None:
# The data servicer might be sleeping while waiting for clients to
# reconnect. Signal that they no longer have to sleep and can exit
# immediately, since the RPC server is stopped.
self.grpc_server.stop(grace)
self.data_servicer.stopped.set()
# Add a hook for all the cases that previously
# expected simply a gRPC server
def __getattr__(self, attr):
return getattr(self.grpc_server, attr)
def _get_client_id_from_context(context: Any) -> str:
"""
Get `client_id` from gRPC metadata. If the `client_id` is not present,
this function logs an error and sets the status_code.
"""
metadata = dict(context.invocation_metadata())
client_id = metadata.get("client_id") or ""
if client_id == "":
logger.error("Client connecting with no client_id")
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return client_id
def _propagate_error_in_context(e: Exception, context: Any) -> bool:
"""
Encode an error into the context of an RPC response. Returns True
if the error can be recovered from, false otherwise
"""
try:
if isinstance(e, grpc.RpcError):
# RPC error, propagate directly by copying details into context
context.set_code(e.code())
context.set_details(e.details())
return e.code() not in GRPC_UNRECOVERABLE_ERRORS
except Exception:
# Extra precaution -- if encoding the RPC directly fails fallback
# to treating it as a regular error
pass
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
context.set_details(str(e))
return False
def _id_is_newer(id1: int, id2: int) -> bool:
"""
We should only replace cache entries with the responses for newer IDs.
Most of the time newer IDs will be the ones with higher value, except when
the req_id counter rolls over. We check for this case by checking the
distance between the two IDs. If the distance is significant, then it's
likely that the req_id counter rolled over, and the smaller id should
still be used to replace the one in cache.
"""
diff = abs(id2 - id1)
# Int32 max is also the maximum number of simultaneous in-flight requests.
if diff > (INT32_MAX // 2):
# Rollover likely occurred. In this case the smaller ID is newer
return id1 < id2
return id1 > id2
| ClientServerHandle |
python | scipy__scipy | scipy/sparse/_data.py | {
"start": 476,
"end": 4720
} | class ____(_spbase):
def __init__(self, arg1, *, maxprint=None):
_spbase.__init__(self, arg1, maxprint=maxprint)
@property
def dtype(self):
return self.data.dtype
@dtype.setter
def dtype(self, newtype):
self.data = self.data.view(newtype)
def _deduped_data(self):
if hasattr(self, 'sum_duplicates'):
self.sum_duplicates()
return self.data
def __abs__(self):
return self._with_data(abs(self._deduped_data()))
def __round__(self, ndigits=0):
return self._with_data(np.around(self._deduped_data(), decimals=ndigits))
def _real(self):
return self._with_data(self.data.real)
def _imag(self):
return self._with_data(self.data.imag)
def __neg__(self):
if self.dtype.kind == 'b':
raise NotImplementedError('negating a boolean sparse array is not '
'supported')
return self._with_data(-self.data)
def __imul__(self, other): # self *= other
if isscalarlike(other):
self.data *= other
return self
return NotImplemented
def __itruediv__(self, other): # self /= other
if isscalarlike(other):
recip = 1.0 / other
self.data *= recip
return self
else:
return NotImplemented
def astype(self, dtype, casting='unsafe', copy=True):
dtype = np.dtype(dtype)
if self.dtype != dtype:
matrix = self._with_data(
self.data.astype(dtype, casting=casting, copy=True),
copy=True
)
return matrix._with_data(matrix._deduped_data(), copy=False)
elif copy:
return self.copy()
else:
return self
astype.__doc__ = _spbase.astype.__doc__
def conjugate(self, copy=True):
if np.issubdtype(self.dtype, np.complexfloating):
return self._with_data(self.data.conjugate(), copy=copy)
elif copy:
return self.copy()
else:
return self
conjugate.__doc__ = _spbase.conjugate.__doc__
def copy(self):
return self._with_data(self.data.copy(), copy=True)
copy.__doc__ = _spbase.copy.__doc__
def power(self, n, dtype=None):
"""
This function performs element-wise power.
Parameters
----------
n : scalar
n is a non-zero scalar (nonzero avoids dense ones creation)
If zero power is desired, special case it to use `np.ones`
dtype : If dtype is not specified, the current dtype will be preserved.
Raises
------
NotImplementedError : if n is a zero scalar
If zero power is desired, special case it to use
``np.ones(A.shape, dtype=A.dtype)``
"""
if not isscalarlike(n):
raise NotImplementedError("input is not scalar")
if not n:
raise NotImplementedError(
"zero power is not supported as it would densify the matrix.\n"
"Use `np.ones(A.shape, dtype=A.dtype)` for this case."
)
data = self._deduped_data()
if dtype is not None:
data = data.astype(dtype, copy=False)
return self._with_data(data ** n)
###########################
# Multiplication handlers #
###########################
def _mul_scalar(self, other):
return self._with_data(self.data * other)
# Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix.
for npfunc in _ufuncs_with_fixed_point_at_zero:
name = npfunc.__name__
def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
setattr(_data_matrix, name, _create_method(npfunc))
def _find_missing_index(ind, n):
for k, a in enumerate(ind):
if k != a:
return k
k += 1
if k < n:
return k
else:
return -1
| _data_matrix |
python | cherrypy__cherrypy | cherrypy/process/wspbus.py | {
"start": 4579,
"end": 5241
} | class ____(object):
class State(object):
name = None
def __repr__(self):
return 'states.%s' % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
try:
import fcntl
except ImportError:
max_files = 0
else:
try:
max_files = os.sysconf('SC_OPEN_MAX')
except AttributeError:
max_files = 1024
| _StateEnum |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 21204,
"end": 27304
} | class ____(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the DETR paper).
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]):
return tensor if object_queries is None else tensor + object_queries
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
object_queries: Optional[torch.Tensor] = None,
key_value_states: Optional[torch.Tensor] = None,
spatial_position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
# add position embeddings to the hidden states before projecting to queries and keys
if object_queries is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, object_queries)
# add key-value position embeddings to the key value states
if spatial_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (batch_size, 1, target_len, source_len):
raise ValueError(
f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is"
f" {attention_mask.size()}"
)
if attention_mask.dtype == torch.bool:
attention_mask = torch.zeros_like(attention_mask, dtype=attn_weights.dtype).masked_fill_(
attention_mask, -torch.inf
)
attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
| DetrAttention |
python | tox-dev__tox | src/tox/tox_env/python/pip/req/file.py | {
"start": 4080,
"end": 4792
} | class ____:
def __init__(
self,
filename: str,
lineno: int,
args: str,
opts: Namespace,
constraint: bool, # noqa: FBT001
) -> None:
self.filename = filename
self.lineno = lineno
self.opts = opts
self.constraint = constraint
if args:
self.is_requirement = True
self.is_editable = False
self.requirement = args
elif opts.editables:
self.is_requirement = True
self.is_editable = True
# We don't support multiple -e on one line
self.requirement = opts.editables[0]
else:
self.is_requirement = False
| ParsedLine |
python | scipy__scipy | scipy/special/tests/test_logit.py | {
"start": 3670,
"end": 6470
} | class ____:
def test_large_negative(self):
x = np.array([-10000.0, -750.0, -500.0, -35.0])
y = log_expit(x)
assert_equal(y, x)
def test_large_positive(self):
x = np.array([750.0, 1000.0, 10000.0])
y = log_expit(x)
# y will contain -0.0, and -0.0 is used in the expected value,
# but assert_equal does not check the sign of zeros, and I don't
# think the sign is an essential part of the test (i.e. it would
# probably be OK if log_expit(1000) returned 0.0 instead of -0.0).
assert_equal(y, np.array([-0.0, -0.0, -0.0]))
def test_basic_float64(self):
x = np.array([-32, -20, -10, -3, -1, -0.1, -1e-9,
0, 1e-9, 0.1, 1, 10, 100, 500, 710, 725, 735])
y = log_expit(x)
#
# Expected values were computed with mpmath:
#
# import mpmath
#
# mpmath.mp.dps = 100
#
# def mp_log_expit(x):
# return -mpmath.log1p(mpmath.exp(-x))
#
# expected = [float(mp_log_expit(t)) for t in x]
#
expected = [-32.000000000000014, -20.000000002061153,
-10.000045398899218, -3.048587351573742,
-1.3132616875182228, -0.7443966600735709,
-0.6931471810599453, -0.6931471805599453,
-0.6931471800599454, -0.6443966600735709,
-0.3132616875182228, -4.539889921686465e-05,
-3.720075976020836e-44, -7.124576406741286e-218,
-4.47628622567513e-309, -1.36930634e-315,
-6.217e-320]
# When tested locally, only one value in y was not exactly equal to
# expected. That was for x=1, and the y value differed from the
# expected by 1 ULP. For this test, however, I'll use rtol=1e-15.
assert_allclose(y, expected, rtol=1e-15)
def test_basic_float32(self):
x = np.array([-32, -20, -10, -3, -1, -0.1, -1e-9,
0, 1e-9, 0.1, 1, 10, 100], dtype=np.float32)
y = log_expit(x)
#
# Expected values were computed with mpmath:
#
# import mpmath
#
# mpmath.mp.dps = 100
#
# def mp_log_expit(x):
# return -mpmath.log1p(mpmath.exp(-x))
#
# expected = [np.float32(mp_log_expit(t)) for t in x]
#
expected = np.array([-32.0, -20.0, -10.000046, -3.0485873,
-1.3132616, -0.7443967, -0.6931472,
-0.6931472, -0.6931472, -0.64439666,
-0.3132617, -4.5398898e-05, -3.8e-44],
dtype=np.float32)
assert_allclose(y, expected, rtol=5e-7)
| TestLogExpit |
python | eventlet__eventlet | tests/mock.py | {
"start": 9988,
"end": 10474
} | class ____:
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
| _Sentinel |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 255293,
"end": 256423
} | class ____(Response):
"""
Response of tasks.make_public endpoint.
:param updated: Number of tasks updated
:type updated: int
"""
_service = "tasks"
_action = "make_public"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePublicResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| MakePublicResponse |
python | huggingface__transformers | src/transformers/models/phi3/modular_phi3.py | {
"start": 4006,
"end": 7284
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.num_key_value_heads = config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
qkv = self.qkv_proj(hidden_states)
query_pos = self.config.num_attention_heads * self.head_dim
query_states = qkv[..., :query_pos]
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None),
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Phi3Attention |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/qual_names_test.py | {
"start": 1069,
"end": 6360
} | class ____(test.TestCase):
def test_from_str(self):
a = QN('a')
b = QN('b')
a_dot_b = QN(a, attr='b')
a_sub_b = QN(a, subscript=b)
self.assertEqual(qual_names.from_str('a.b'), a_dot_b)
self.assertEqual(qual_names.from_str('a'), a)
self.assertEqual(qual_names.from_str('a[b]'), a_sub_b)
def test_basic(self):
a = QN('a')
self.assertEqual(a.qn, ('a',))
self.assertEqual(str(a), 'a')
self.assertEqual(a.ssf(), 'a')
self.assertEqual(a.ast().id, 'a')
self.assertFalse(a.is_composite())
with self.assertRaises(ValueError):
_ = a.parent
a_b = QN(a, attr='b')
self.assertEqual(a_b.qn, (a, 'b'))
self.assertEqual(str(a_b), 'a.b')
self.assertEqual(a_b.ssf(), 'a_b')
self.assertEqual(a_b.ast().value.id, 'a')
self.assertEqual(a_b.ast().attr, 'b')
self.assertTrue(a_b.is_composite())
self.assertEqual(a_b.parent.qn, ('a',))
def test_subscripts(self):
a = QN('a')
b = QN('b')
a_sub_b = QN(a, subscript=b)
self.assertEqual(a_sub_b.qn, (a, b))
self.assertEqual(str(a_sub_b), 'a[b]')
self.assertEqual(a_sub_b.ssf(), 'a_sub_b')
self.assertEqual(a_sub_b.ast().value.id, 'a')
self.assertEqual(a_sub_b.ast().slice.id, 'b')
self.assertTrue(a_sub_b.is_composite())
self.assertTrue(a_sub_b.has_subscript())
self.assertEqual(a_sub_b.parent.qn, ('a',))
c = QN('c')
b_sub_c = QN(b, subscript=c)
a_sub_b_sub_c = QN(a, subscript=b_sub_c)
self.assertEqual(a_sub_b_sub_c.qn, (a, b_sub_c))
self.assertTrue(a_sub_b_sub_c.is_composite())
self.assertTrue(a_sub_b_sub_c.has_subscript())
self.assertEqual(b_sub_c.qn, (b, c))
self.assertEqual(str(a_sub_b_sub_c), 'a[b[c]]')
self.assertEqual(a_sub_b_sub_c.ssf(), 'a_sub_b_sub_c')
self.assertEqual(a_sub_b_sub_c.ast().value.id, 'a')
self.assertEqual(a_sub_b_sub_c.ast().slice.value.id, 'b')
self.assertEqual(a_sub_b_sub_c.ast().slice.slice.id, 'c')
self.assertEqual(b_sub_c.ast().slice.id, 'c')
self.assertEqual(a_sub_b_sub_c.parent.qn, ('a',))
with self.assertRaises(ValueError):
QN('a', 'b')
def test_equality(self):
a = QN('a')
a2 = QN('a')
a_b = QN(a, attr='b')
self.assertEqual(a2.qn, ('a',))
with self.assertRaises(ValueError):
_ = a.parent
a_b2 = QN(a, attr='b')
self.assertEqual(a_b2.qn, (a, 'b'))
self.assertEqual(a_b2.parent.qn, ('a',))
self.assertTrue(a2 == a)
self.assertFalse(a2 is a)
self.assertTrue(a_b.parent == a)
self.assertTrue(a_b2.parent == a)
self.assertTrue(a_b2 == a_b)
self.assertFalse(a_b2 is a_b)
self.assertFalse(a_b2 == a)
a_sub_b = QN(a, subscript='b')
a_sub_b2 = QN(a, subscript='b')
self.assertTrue(a_sub_b == a_sub_b2)
self.assertFalse(a_sub_b == a_b)
def test_nested_attrs_subscripts(self):
a = QN('a')
b = QN('b')
c = QN('c')
b_sub_c = QN(b, subscript=c)
a_sub_b_sub_c = QN(a, subscript=b_sub_c)
b_dot_c = QN(b, attr='c')
a_sub__b_dot_c = QN(a, subscript=b_dot_c)
a_sub_b = QN(a, subscript=b)
a_sub_b__dot_c = QN(a_sub_b, attr='c')
a_dot_b = QN(a, attr='b')
a_dot_b_sub_c = QN(a_dot_b, subscript=c)
self.assertEqual(str(a_sub_b_sub_c), 'a[b[c]]')
self.assertEqual(str(a_sub__b_dot_c), 'a[b.c]')
self.assertEqual(str(a_sub_b__dot_c), 'a[b].c')
self.assertEqual(str(a_dot_b_sub_c), 'a.b[c]')
self.assertNotEqual(a_sub_b_sub_c, a_sub__b_dot_c)
self.assertNotEqual(a_sub_b_sub_c, a_sub_b__dot_c)
self.assertNotEqual(a_sub_b_sub_c, a_dot_b_sub_c)
self.assertNotEqual(a_sub__b_dot_c, a_sub_b__dot_c)
self.assertNotEqual(a_sub__b_dot_c, a_dot_b_sub_c)
self.assertNotEqual(a_sub_b__dot_c, a_dot_b_sub_c)
def test_hashable(self):
d = {QN('a'): 'a', QN('b'): 'b'}
self.assertEqual(d[QN('a')], 'a')
self.assertEqual(d[QN('b')], 'b')
self.assertNotIn(QN('c'), d)
def test_literals(self):
a = QN('a')
a_sub_str_b = QN(a, subscript=QN(qual_names.Literal('b')))
a_sub_b = QN(a, subscript=QN('b'))
self.assertNotEqual(a_sub_str_b, a_sub_b)
self.assertNotEqual(hash(a_sub_str_b), hash(a_sub_b))
self.assertEqual(a_sub_str_b.ast().slice.value, 'b')
self.assertEqual(str(a_sub_str_b), "a['b']")
a_sub_three = QN(a, subscript=QN(qual_names.Literal(3)))
self.assertEqual(a_sub_three.ast().slice.value, 3)
self.assertEqual(str(a_sub_three), 'a[3]')
def test_support_set(self):
a = QN('a')
b = QN('b')
c = QN('c')
a_sub_b = QN(a, subscript=b)
a_dot_b = QN(a, attr='b')
a_dot_b_dot_c = QN(a_dot_b, attr='c')
a_dot_b_sub_c = QN(a_dot_b, subscript=c)
self.assertSetEqual(a.support_set, set((a,)))
self.assertSetEqual(a_sub_b.support_set, set((a, b)))
self.assertSetEqual(a_dot_b.support_set, set((a,)))
self.assertSetEqual(a_dot_b_dot_c.support_set, set((a,)))
self.assertSetEqual(a_dot_b_sub_c.support_set, set((a, c)))
def test_comparison(self):
less_than_apos = chr(ord('\'') - 1)
self.assertGreater(QN('z'), QN(qual_names.Literal('a')))
self.assertLess(QN(less_than_apos), QN(qual_names.Literal('a')))
self.assertGreater(QN(qual_names.Literal('z')), QN(less_than_apos))
self.assertLess(QN(qual_names.Literal('a')), QN('z'))
| QNTest |
python | sqlalchemy__sqlalchemy | test/sql/test_external_traversal.py | {
"start": 1885,
"end": 10817
} | class ____(
fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL
):
"""test ClauseVisitor's traversal, particularly its
ability to copy and modify a ClauseElement in place."""
@classmethod
def setup_test_class(cls):
global A, B
# establish two fictitious ClauseElements.
# define deep equality semantics as well as deep
# identity semantics.
class A(ClauseElement):
__visit_name__ = "a"
_traverse_internals = []
def __init__(self, expr):
self.expr = expr
def is_other(self, other):
return other is self
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
return other.expr == self.expr
def __ne__(self, other):
return other.expr != self.expr
def __str__(self):
return "A(%s)" % repr(self.expr)
class B(ClauseElement):
__visit_name__ = "b"
def __init__(self, *items):
self.items = items
def is_other(self, other):
if other is not self:
return False
for i1, i2 in zip(self.items, other.items):
if i1 is not i2:
return False
return True
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return False
return True
def __ne__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return True
return False
def _copy_internals(self, clone=_clone, **kw):
self.items = [clone(i, **kw) for i in self.items]
def get_children(self, **kwargs):
return self.items
def __str__(self):
return "B(%s)" % repr([str(i) for i in self.items])
def test_test_classes(self):
a1 = A("expr1")
struct = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct2 = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct3 = B(
a1, A("expr2"), B(A("expr1b"), A("expr2bmodified")), A("expr3")
)
assert a1.is_other(a1)
assert struct.is_other(struct)
assert struct == struct2
assert struct != struct3
assert not struct.is_other(struct2)
assert not struct.is_other(struct3)
def test_clone(self):
struct = B(
A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")
)
class Vis(CloningVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert not struct.is_other(s2)
def test_no_clone(self):
struct = B(
A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")
)
class Vis(ClauseVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert struct.is_other(s2)
def test_clone_anon_label(self):
from sqlalchemy.sql.elements import Grouping
c1 = Grouping(literal_column("q"))
s1 = select(c1)
class Vis(CloningVisitor):
def visit_grouping(self, elem):
pass
vis = Vis()
s2 = vis.traverse(s1)
eq_(list(s2.selected_columns)[0]._anon_name_label, c1._anon_name_label)
@testing.combinations(
("clone",), ("pickle",), ("conv_to_unique"), ("none"), argnames="meth"
)
@testing.combinations(
("name with space",),
("name with [brackets]",),
("name with~~tildes~~",),
argnames="name",
)
@testing.combinations(True, False, argnames="positional")
def test_bindparam_key_proc_for_copies(self, meth, name, positional):
r"""test :ticket:`6249`.
Revised for :ticket:`8056`.
The key of the bindparam needs spaces and other characters
escaped out for the POSTCOMPILE regex to work correctly.
Currently, the bind key reg is::
re.sub(r"[%\(\) \$\[\]]", "_", name)
and the compiler postcompile reg is::
re.sub(r"\__[POSTCOMPILE_(\S+)\]", process_expanding, self.string)
Interestingly, brackets in the name seems to work out.
"""
expr = column(name).in_([1, 2, 3])
if meth == "clone":
expr = visitors.cloned_traverse(expr, {}, {})
elif meth == "pickle":
expr = pickle.loads(pickle.dumps(expr))
elif meth == "conv_to_unique":
expr.right.unique = False
expr.right._convert_to_unique()
token = re.sub(r"[%\(\) \$\[\]]", "_", name)
if positional:
self.assert_compile(
expr,
'"%(name)s" IN (?, ?, ?)' % {"name": name},
checkpositional=(1, 2, 3),
render_postcompile=True,
dialect="default_qmark",
)
else:
tokens = ["%s_1_%s" % (token, i) for i in range(1, 4)]
self.assert_compile(
expr,
'"%(name)s" IN (:%(token)s_1_1, '
":%(token)s_1_2, :%(token)s_1_3)"
% {"name": name, "token": token},
checkparams=dict(zip(tokens, [1, 2, 3])),
render_postcompile=True,
dialect="default",
)
def test_expanding_in_bindparam_safe_to_clone(self):
expr = column("x").in_([1, 2, 3])
expr2 = expr._clone()
# shallow copy, bind is used twice
is_(expr.right, expr2.right)
stmt = and_(expr, expr2)
self.assert_compile(
stmt, "x IN (__[POSTCOMPILE_x_1]) AND x IN (__[POSTCOMPILE_x_1])"
)
self.assert_compile(
stmt, "x IN (1, 2, 3) AND x IN (1, 2, 3)", literal_binds=True
)
def test_traversal_size(self):
"""Test :ticket:`6304`.
Testing that _iterate_from_elements returns only unique FROM
clauses; overall traversal should be short and all items unique.
"""
t = table("t", *[column(x) for x in "pqrxyz"])
s1 = select(t.c.p, t.c.q, t.c.r, t.c.x, t.c.y, t.c.z).subquery()
s2 = (
select(s1.c.p, s1.c.q, s1.c.r, s1.c.x, s1.c.y, s1.c.z)
.select_from(s1)
.subquery()
)
s3 = (
select(s2.c.p, s2.c.q, s2.c.r, s2.c.x, s2.c.y, s2.c.z)
.select_from(s2)
.subquery()
)
tt = list(s3.element._iterate_from_elements())
eq_(tt, [s2])
total = list(visitors.iterate(s3))
# before the bug was fixed, this was 750
eq_(len(total), 25)
seen = set()
for elem in visitors.iterate(s3):
assert elem not in seen
seen.add(elem)
eq_(len(seen), 25)
def test_change_in_place(self):
struct = B(
A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")
)
struct2 = B(
A("expr1"),
A("expr2modified"),
B(A("expr1b"), A("expr2b")),
A("expr3"),
)
struct3 = B(
A("expr1"),
A("expr2"),
B(A("expr1b"), A("expr2bmodified")),
A("expr3"),
)
class Vis(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2":
a.expr = "expr2modified"
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct != s2
assert not struct.is_other(s2)
assert struct2 == s2
class Vis2(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2b":
a.expr = "expr2bmodified"
def visit_b(self, b):
pass
vis2 = Vis2()
s3 = vis2.traverse(struct)
assert struct != s3
assert struct3 == s3
def test_visit_name(self):
# override fns in testlib/schema.py
from sqlalchemy import Column
class CustomObj(Column):
pass
assert CustomObj.__visit_name__ == Column.__visit_name__ == "column"
foo, bar = CustomObj("foo", String), CustomObj("bar", String)
bin_ = foo == bar
set(ClauseVisitor().iterate(bin_))
assert set(ClauseVisitor().iterate(bin_)) == {foo, bar, bin_}
| TraversalTest |
python | django__django | tests/m2m_through/models.py | {
"start": 2512,
"end": 2772
} | class ____(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, models.CASCADE)
second = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="+")
date_friended = models.DateField()
# Custom through link fields
| SymmetricalFriendship |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/from_tensor_slices_test.py | {
"start": 1597,
"end": 13245
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesEmptyComponent(self):
components = ()
with self.assertRaises(ValueError):
dataset_ops.Dataset.from_tensor_slices(components)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlices(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (
np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(
np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
for i in range(4):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
results = self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesDataset(self):
dss = [dataset_ops.Dataset.range(10) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensor_slices(dss)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(ds, expected_output=list(range(10)) * 10)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesDatasetOfOrderedDict(self):
dss = [dataset_ops.Dataset.range(10).map(
lambda x: collections.OrderedDict([("x", x)])) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensor_slices(dss)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(
ds,
expected_output=[collections.OrderedDict([("x", x)])
for x in list(range(10)) * 10])
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesDatasetInFunction(self):
dss = [dataset_ops.Dataset.range(10) for _ in range(10)]
ds = dataset_ops.Dataset.from_tensors(dss)
ds = ds.flat_map(dataset_ops.Dataset.from_tensor_slices)
ds = ds.flat_map(lambda x: x)
self.assertDatasetProduces(ds, expected_output=list(range(10)) * 10)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesSparse(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
self.assertEqual(
[tensor_shape.TensorShape(c.dense_shape[1:]) for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
self.assertDatasetProduces(dataset, expected_output=expected)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesMixed(self):
"""Test a dataset that represents the slices from a tuple of tensors."""
components = (np.tile(np.array([[1], [2], [3]]), 20),
np.tile(np.array([[12], [13], [14]]), 22),
np.array([37.0, 38.0, 39.0]),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual([
tensor_shape.TensorShape(c.dense_shape[1:])
if sparse_tensor.is_sparse(c) else c.shape[1:] for c in components
], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
for i in range(3):
results = self.evaluate(get_next())
for component, result_component in zip(
(list(zip(*components[:3]))[i] + expected[i]), results):
self.assertValuesEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesWithDict(self):
components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["foo"])
self.assertEqual(dtypes.float32,
dataset_ops.get_legacy_output_types(dataset)["bar"])
self.assertEqual((), dataset_ops.get_legacy_output_shapes(dataset)["foo"])
self.assertEqual((1,), dataset_ops.get_legacy_output_shapes(dataset)["bar"])
for i in range(3):
results = self.evaluate(get_next())
self.assertEqual(components["foo"][i], results["foo"])
self.assertEqual(components["bar"][i], results["bar"])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesRagged(self):
components = (
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]),
ragged_factory_ops.constant_value([[[3]], [[4]], [[5]]]),
)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
expected = [(ragged_factory_ops.constant_value([[0]]),
ragged_factory_ops.constant_value([[3]])),
(ragged_factory_ops.constant_value([[1]]),
ragged_factory_ops.constant_value([[4]])),
(ragged_factory_ops.constant_value([[2]]),
ragged_factory_ops.constant_value([[5]]))]
self.assertDatasetProduces(dataset, expected_output=expected)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesMixedRagged(self):
components = (np.tile(np.array([[1], [2], [3]]),
20), np.tile(np.array([[12], [13], [14]]),
22), np.array([37.0, 38.0, 39.0]),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])),
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
get_next = self.getNext(dataset)
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[0]
])),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[1]
])),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3])), ragged_factory_ops.constant_value([[2]
])),
]
for i in range(3):
results = self.evaluate(get_next())
for component, result_component in zip(
(list(zip(*components[:3]))[i] + expected[i]), results):
self.assertValuesEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromTensorSlicesWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1]], dtype=np.uint8), 2),
np.tile(np.array([[2], [256]], dtype=np.uint16), 2),
np.tile(np.array([[4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[8], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(2)]
dataset = dataset_ops.Dataset.from_tensor_slices(components)
self.assertEqual(expected_types,
dataset_ops.get_legacy_output_types(dataset))
self.assertDatasetProduces(dataset, expected_output)
| FromTensorSlicesTest |
python | walkccc__LeetCode | solutions/2091. Removing Minimum and Maximum From Array/2091.py | {
"start": 0,
"end": 224
} | class ____:
def minimumDeletions(self, nums: list[int]) -> int:
n = len(nums)
a = nums.index(min(nums))
b = nums.index(max(nums))
if a > b:
a, b = b, a
return min(a + 1 + n - b, b + 1, n - a)
| Solution |
python | PrefectHQ__prefect | tests/test_serializers.py | {
"start": 3558,
"end": 5716
} | class ____:
@pytest.mark.parametrize("data", SERIALIZER_TEST_CASES)
def test_simple_roundtrip(self, data):
serializer = PickleSerializer()
serialized = serializer.dumps(data)
assert serializer.loads(serialized) == data
@pytest.mark.parametrize("data", EXCEPTION_TEST_CASES)
def test_exception_roundtrip(self, data):
serializer = PickleSerializer()
serialized = serializer.dumps(data)
assert exceptions_equal(serializer.loads(serialized), data)
@pytest.mark.parametrize("data", SERIALIZER_TEST_CASES)
def test_simple_roundtrip_with_builtin_pickle(self, data):
serializer = PickleSerializer(picklelib="pickle")
serialized = serializer.dumps(data)
assert serializer.loads(serialized) == data
def test_picklelib_must_be_string(self):
import pickle
with pytest.raises(ValueError):
PickleSerializer(picklelib=pickle)
def test_picklelib_is_used(self, monkeypatch: pytest.MonkeyPatch):
dumps = MagicMock(return_value=b"test")
loads = MagicMock(return_value="test")
monkeypatch.setattr("pickle.dumps", dumps)
monkeypatch.setattr("pickle.loads", loads)
serializer = PickleSerializer(picklelib="pickle")
serializer.dumps("test")
dumps.assert_called_once_with("test")
serializer.loads(b"test")
loads.assert_called_once_with(base64.decodebytes(b"test"))
def test_picklelib_must_implement_dumps(self, monkeypatch: pytest.MonkeyPatch):
import pickle
monkeypatch.delattr(pickle, "dumps")
with pytest.raises(
ValueError,
match="Pickle library at 'pickle' does not have a 'dumps' method.",
):
PickleSerializer(picklelib="pickle")
def test_picklelib_must_implement_loads(self, monkeypatch: pytest.MonkeyPatch):
import pickle
monkeypatch.delattr(pickle, "loads")
with pytest.raises(
ValueError,
match="Pickle library at 'pickle' does not have a 'loads' method.",
):
PickleSerializer(picklelib="pickle")
| TestPickleSerializer |
python | pytest-dev__pytest | src/_pytest/_py/path.py | {
"start": 5661,
"end": 7161
} | class ____:
if TYPE_CHECKING:
@property
def size(self) -> int: ...
@property
def mtime(self) -> float: ...
def __getattr__(self, name: str) -> Any:
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = error.checked_call(pwd.getpwuid, self.uid) # type:ignore[attr-defined,unused-ignore]
return entry[0]
@property
def group(self):
"""Return group name of file."""
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = error.checked_call(grp.getgrgid, self.gid) # type:ignore[attr-defined,unused-ignore]
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2] # type:ignore[attr-defined,unused-ignore]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2] # type:ignore[attr-defined,unused-ignore]
return group
| Stat |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_ls_commands.py | {
"start": 132,
"end": 3096
} | class ____:
"""Test suite for ls commands."""
def setup_method(self):
"""Set up test fixtures."""
self.runner = CliRunner()
def test_ls_symbols_with_package_dagster(self):
"""Test listing symbols from dagster package."""
result = self.runner.invoke(ls, ["symbols", "--package", "dagster"])
# Should complete successfully
assert result.exit_code == 0
# Should contain some @public-decorated dagster symbols
output = result.output
assert "dagster.Component" in output
assert "dagster.ComponentLoadContext" in output
assert "dagster.definitions" in output
def test_ls_symbols_with_package_dagster_core(self):
"""Test listing symbols from dagster._core subpackage."""
result = self.runner.invoke(ls, ["symbols", "--package", "dagster._core.definitions"])
# Should complete successfully
assert result.exit_code == 0
# This subpackage may have no @public symbols, which is valid
# The test passes as long as the command doesn't error
assert result.exit_code == 0
def test_ls_symbols_with_nonexistent_package(self):
"""Test listing symbols from nonexistent package should fail."""
result = self.runner.invoke(ls, ["symbols", "--package", "nonexistent.package"])
# Should fail with exit code 1
assert result.exit_code == 1
assert "Error: Could not import package" in result.output
def test_ls_symbols_no_options_fails(self):
"""Test that ls symbols without options fails."""
result = self.runner.invoke(ls, ["symbols"])
# Should fail with exit code 1
assert result.exit_code == 1
assert "Error: One of --all or --package must be provided" in result.output
def test_ls_symbols_all_runs(self):
"""Test that ls symbols --all runs without NotImplementedError."""
result = self.runner.invoke(ls, ["symbols", "--all"])
# Should not raise NotImplementedError and should exit cleanly
assert result.exit_code in [0, 1] # Can succeed or fail but shouldn't crash
assert "Global symbol discovery functionality not yet implemented" not in result.output
def test_ls_packages_runs(self):
"""Test that ls packages runs without NotImplementedError."""
result = self.runner.invoke(ls, ["packages"])
# Should not raise NotImplementedError and should exit cleanly
assert result.exit_code == 0 # This should succeed as it lists packages
assert "Package discovery functionality not yet implemented" not in result.output
def test_ls_help_command(self):
"""Test that ls help works."""
result = self.runner.invoke(ls, ["--help"])
assert result.exit_code == 0
assert "List packages and symbols" in result.output
assert "packages" in result.output
assert "symbols" in result.output
| TestLsCommands |
python | pandas-dev__pandas | pandas/io/formats/excel.py | {
"start": 1173,
"end": 1652
} | class ____:
__fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")
__slots__ = __fields__
def __init__(
self,
row: int,
col: int,
val,
style=None,
mergestart: int | None = None,
mergeend: int | None = None,
) -> None:
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
| ExcelCell |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_privacy_urls.py | {
"start": 18989,
"end": 19146
} | class ____(PublicUserProfileMixin, TestCase):
def login(self):
pass
def is_admin(self):
return False
| PublicUserProfileUnauthAccessTest |
python | docker__docker-py | tests/integration/api_container_test.py | {
"start": 30864,
"end": 32635
} | class ____(BaseAPIIntegrationTest):
def test_wait(self):
res = self.client.create_container(TEST_IMG, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)['StatusCode']
assert exitcode == 0
inspect = self.client.inspect_container(id)
assert 'Running' in inspect['State']
assert inspect['State']['Running'] is False
assert 'ExitCode' in inspect['State']
assert inspect['State']['ExitCode'] == exitcode
def test_wait_with_dict_instead_of_id(self):
res = self.client.create_container(TEST_IMG, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
exitcode = self.client.wait(res)['StatusCode']
assert exitcode == 0
inspect = self.client.inspect_container(res)
assert 'Running' in inspect['State']
assert inspect['State']['Running'] is False
assert 'ExitCode' in inspect['State']
assert inspect['State']['ExitCode'] == exitcode
@requires_api_version('1.30')
def test_wait_with_condition(self):
ctnr = self.client.create_container(TEST_IMG, 'true')
self.tmp_containers.append(ctnr)
with pytest.raises(requests.exceptions.ConnectionError):
self.client.wait(ctnr, condition='removed', timeout=1)
ctnr = self.client.create_container(
TEST_IMG, ['sleep', '3'],
host_config=self.client.create_host_config(auto_remove=True)
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
assert self.client.wait(
ctnr, condition='removed', timeout=5
)['StatusCode'] == 0
| WaitTest |
python | neetcode-gh__leetcode | python/0355-design-twitter.py | {
"start": 0,
"end": 1429
} | class ____:
def __init__(self):
self.count = 0
self.tweetMap = defaultdict(list) # userId -> list of [count, tweetIds]
self.followMap = defaultdict(set) # userId -> set of followeeId
def postTweet(self, userId: int, tweetId: int) -> None:
self.tweetMap[userId].append([self.count, tweetId])
self.count -= 1
def getNewsFeed(self, userId: int) -> List[int]:
res = []
minHeap = []
self.followMap[userId].add(userId)
for followeeId in self.followMap[userId]:
if followeeId in self.tweetMap:
index = len(self.tweetMap[followeeId]) - 1
count, tweetId = self.tweetMap[followeeId][index]
heapq.heappush(minHeap, [count, tweetId, followeeId, index - 1])
while minHeap and len(res) < 10:
count, tweetId, followeeId, index = heapq.heappop(minHeap)
res.append(tweetId)
if index >= 0:
count, tweetId = self.tweetMap[followeeId][index]
heapq.heappush(minHeap, [count, tweetId, followeeId, index - 1])
return res
def follow(self, followerId: int, followeeId: int) -> None:
self.followMap[followerId].add(followeeId)
def unfollow(self, followerId: int, followeeId: int) -> None:
if followeeId in self.followMap[followerId]:
self.followMap[followerId].remove(followeeId)
| Twitter |
python | getsentry__sentry | tests/sentry/release_health/test_tasks.py | {
"start": 22574,
"end": 25790
} | class ____(TestMetricReleaseMonitor):
def test_adopt_releases_respects_environment_and_threshold(self) -> None:
# Empty environment should be ignored
adopt_releases(
self.organization.id,
{self.project1.id: {"": {"releases": {"0.1": 1}, "total_sessions": 1}}},
)
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id, environment__name=""
).exists()
# Valid env with releases meeting 10% threshold should be adopted
adopt_releases(
self.organization.id,
{
self.project1.id: {
"prod": {"releases": {"0.1": 1, "0.3": 9}, "total_sessions": 10}
},
self.project2.id: {"prod": {"releases": {"0.1": 1, "0.2": 4}, "total_sessions": 5}},
},
)
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release__version="0.1",
environment__name="prod",
adopted__isnull=False,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release__version="0.3",
environment__name="prod",
adopted__isnull=False,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release__version="0.1",
environment__name="prod",
adopted__isnull=False,
).exists()
assert ReleaseProjectEnvironment.objects.filter(
project_id=self.project2.id,
release__version="0.2",
environment__name="prod",
adopted__isnull=False,
).exists()
# Below threshold should not adopt
adopt_releases(
self.organization.id,
{self.project1.id: {"prod": {"releases": {"0.1": 1}, "total_sessions": 100}}},
)
assert not ReleaseProjectEnvironment.objects.filter(
project_id=self.project1.id,
release__version="0.1",
environment__name="prod",
adopted__gt=timezone.now(),
).exists()
def test_valid_environment() -> None:
"""A valid environment is one that has at least one session and a non-empty name."""
assert valid_environment("production", 20)
assert not valid_environment("", 20)
assert not valid_environment("production", 0)
def test_valid_and_adopted_release() -> None:
"""A valid release has a valid name and at least 10% of the environment's sessions."""
assert valid_and_adopted_release("release", 10, 100)
assert not valid_and_adopted_release("", 10, 100)
assert not valid_and_adopted_release("\t", 10, 100)
assert not valid_and_adopted_release("release", 10, 101)
def test_has_been_adopted() -> None:
"""An adopted session has at least 10% of the environment's sessions."""
assert has_been_adopted(10, 1)
assert has_been_adopted(100, 10)
assert has_been_adopted(1000, 100)
assert not has_been_adopted(100, 0)
assert not has_been_adopted(100, 1)
assert not has_been_adopted(100, 9)
| TestAdoptReleasesPath |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_container_upload_block_param.py | {
"start": 337,
"end": 602
} | class ____(TypedDict, total=False):
file_id: Required[str]
type: Required[Literal["container_upload"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
| BetaContainerUploadBlockParam |
python | chardet__chardet | chardet/enums.py | {
"start": 152,
"end": 322
} | class ____:
"""
This enum represents the different states a universal detector can be in.
"""
PURE_ASCII = 0
ESC_ASCII = 1
HIGH_BYTE = 2
| InputState |
python | google__pytype | pytype/tools/analyze_project/pytype_runner_test.py | {
"start": 2080,
"end": 7814
} | class ____(unittest.TestCase):
"""Test deps_from_import_graph."""
def setUp(self):
super().setUp()
init = Local('/foo/bar/__init__.py', 'bar/__init__.py', 'bar')
a = Local('/foo/bar/a.py', 'bar/a.py', 'bar.a')
b = Local('/foo/bar/b.py', 'bar/b.py', 'bar.b')
self.sources = [x.path for x in [init, a, b]]
self.provenance = {x.path: x for x in [init, a, b]}
def test_basic(self):
graph = FakeImportGraph(
self.sources, self.provenance, collections.defaultdict(list)
)
deps = pytype_runner.deps_from_import_graph(graph)
expected = [
((Module('/foo/', 'bar/__init__.py', 'bar.__init__'),), ()),
((Module('/foo/', 'bar/a.py', 'bar.a'),), ()),
((Module('/foo/', 'bar/b.py', 'bar.b'),), ()),
]
self.assertEqual(deps, expected)
def test_duplicate_deps(self):
graph = FakeImportGraph(
self.sources,
self.provenance,
collections.defaultdict(lambda: [self.sources[0]] * 2),
)
deps = pytype_runner.deps_from_import_graph(graph)
init = Module('/foo/', 'bar/__init__.py', 'bar.__init__')
expected = [
((init,), (init,)),
((Module('/foo/', 'bar/a.py', 'bar.a'),), (init,)),
((Module('/foo/', 'bar/b.py', 'bar.b'),), (init,)),
]
self.assertEqual(deps, expected)
def test_pyi_src(self):
pyi_mod = Local('/foo/bar/c.pyi', 'bar/c.pyi', 'bar.c')
provenance = {pyi_mod.path: pyi_mod}
provenance.update(self.provenance)
graph = FakeImportGraph(
self.sources + [pyi_mod.path], provenance, collections.defaultdict(list)
)
deps = pytype_runner.deps_from_import_graph(graph)
expected = [
((Module('/foo/', 'bar/__init__.py', 'bar.__init__'),), ()),
((Module('/foo/', 'bar/a.py', 'bar.a'),), ()),
((Module('/foo/', 'bar/b.py', 'bar.b'),), ()),
]
self.assertEqual(deps, expected)
def test_pyi_dep(self):
pyi_mod = Local('/foo/bar/c.pyi', 'bar/c.pyi', 'bar.c')
graph = FakeImportGraph(
self.sources,
self.provenance,
collections.defaultdict(lambda: [pyi_mod.path]),
)
deps = pytype_runner.deps_from_import_graph(graph)
expected = [
((Module('/foo/', 'bar/__init__.py', 'bar.__init__'),), ()),
((Module('/foo/', 'bar/a.py', 'bar.a'),), ()),
((Module('/foo/', 'bar/b.py', 'bar.b'),), ()),
]
self.assertEqual(deps, expected)
def test_pyi_with_src_dep(self):
# py_mod -> pyi_mod -> py_dep
py_mod = Local('/foo/a/b.py', 'a/b.py', 'a.b')
pyi_mod = Local('/foo/bar/c.pyi', 'bar/c.pyi', 'bar.c')
py_dep = Local('/foo/a/c.py', 'a/c.py', 'a.c')
sources = [py_dep, pyi_mod, py_mod]
graph = FakeImportGraph(
source_files=[x.path for x in sources],
provenance={x.path: x for x in sources},
source_to_deps={
py_mod.path: [pyi_mod.path],
pyi_mod.path: [py_dep.path],
py_dep.path: [],
},
)
deps = pytype_runner.deps_from_import_graph(graph)
expected = [
((Module('/foo/', 'a/c.py', 'a.c'),), ()),
(
(Module('/foo/', 'a/b.py', 'a.b'),),
(Module('/foo/', 'a/c.py', 'a.c'),),
),
]
self.assertEqual(deps, expected)
def test_pyi_with_src_dep_transitive(self):
# py_mod -> pyi_mod -> pyi_dep -> py_dep
py_mod = Local('/foo/a/b.py', 'a/b.py', 'a.b')
pyi_mod = Local('/foo/bar/c.pyi', 'bar/c.pyi', 'bar.c')
pyi_dep = Local('/foo/bar/d.pyi', 'bar/d.pyi', 'bar.d')
py_dep = Local('/foo/a/c.py', 'a/c.py', 'a.c')
sources = [py_dep, pyi_dep, pyi_mod, py_mod]
graph = FakeImportGraph(
source_files=[x.path for x in sources],
provenance={x.path: x for x in sources},
source_to_deps={
py_mod.path: [pyi_mod.path],
pyi_mod.path: [pyi_dep.path],
pyi_dep.path: [py_dep.path],
py_dep.path: [],
},
)
deps = pytype_runner.deps_from_import_graph(graph)
expected = [
((Module('/foo/', 'a/c.py', 'a.c'),), ()),
(
(Module('/foo/', 'a/b.py', 'a.b'),),
(Module('/foo/', 'a/c.py', 'a.c'),),
),
]
self.assertEqual(deps, expected)
def test_pyi_with_src_dep_branching(self):
# py_mod -> pyi_mod1 -> py_dep1
# | |--> py_dep2
# |
# |--> pyi_mod2 -> py_dep3
py_mod = Local('/foo/a/b.py', 'a/b.py', 'a.b')
pyi_mod1 = Local('/foo/bar/c.pyi', 'bar/c.pyi', 'bar.c')
py_dep1 = Local('/foo/a/c.py', 'a/c.py', 'a.c')
py_dep2 = Local('/foo/a/d.py', 'a/d.py', 'a.d')
pyi_mod2 = Local('/foo/bar/d.pyi', 'bar/d.pyi', 'bar.d')
py_dep3 = Local('/foo/a/e.py', 'a/e.py', 'a.e')
sources = [py_dep3, pyi_mod2, py_dep2, py_dep1, pyi_mod1, py_mod]
graph = FakeImportGraph(
source_files=[x.path for x in sources],
provenance={x.path: x for x in sources},
source_to_deps={
py_mod.path: [pyi_mod1.path, pyi_mod2.path],
pyi_mod1.path: [py_dep1.path, py_dep2.path],
py_dep1.path: [],
py_dep2.path: [],
pyi_mod2.path: [py_dep3.path],
py_dep3.path: [],
},
)
deps = pytype_runner.deps_from_import_graph(graph)
expected = [
((Module('/foo/', 'a/e.py', 'a.e'),), ()),
((Module('/foo/', 'a/d.py', 'a.d'),), ()),
((Module('/foo/', 'a/c.py', 'a.c'),), ()),
(
(Module('/foo/', 'a/b.py', 'a.b'),),
(
Module('/foo/', 'a/c.py', 'a.c'),
Module('/foo/', 'a/d.py', 'a.d'),
Module('/foo/', 'a/e.py', 'a.e'),
),
),
]
self.assertEqual(deps, expected)
| TestDepsFromImportGraph |
python | jazzband__tablib | src/tablib/_vendor/dbfpy/fields.py | {
"start": 1568,
"end": 6739
} | class ____:
"""Abstract field definition.
Child classes must override ``type`` class attribute to provide datatype
information of the field definition. For more info about types visit
`https://www.clicketyclick.dk/databases/xbase/format/data_types.html`
Also child classes must override ``defaultValue`` field to provide
default value for the field value.
If child class has fixed length ``length`` class attribute must be
overridden and set to the valid value. None value means, that field
isn't of fixed length.
Note: ``name`` field must not be changed after instantiation.
"""
__slots__ = ("name", "decimalCount", "start", "end", "ignoreErrors")
# length of the field, None in case of variable-length field,
# or a number if this field is a fixed-length field
length = None
# field type. for more information about fields types visit
# `https://www.clicketyclick.dk/databases/xbase/format/data_types.html`
# must be overridden in child classes
typeCode = None
# default value for the field. this field must be
# overridden in child classes
defaultValue = None
def __init__(self, name, length=None, decimalCount=None,
start=None, stop=None, ignoreErrors=False):
"""Initialize instance."""
assert self.typeCode is not None, "Type code must be overridden"
assert self.defaultValue is not None, "Default value must be overridden"
# fix arguments
if len(name) > 10:
raise ValueError(f"Field name \"{name}\" is too long")
name = str(name).upper()
if self.__class__.length is None:
if length is None:
raise ValueError(f"[{name}] Length isn't specified")
length = int(length)
if length <= 0:
raise ValueError(f"[{name}] Length must be a positive integer")
else:
length = self.length
if decimalCount is None:
decimalCount = 0
# set fields
self.name = name
# FIXME: validate length according to the specification at
# https://www.clicketyclick.dk/databases/xbase/format/data_types.html
self.length = length
self.decimalCount = decimalCount
self.ignoreErrors = ignoreErrors
self.start = start
self.end = stop
def __eq__(self, other):
return repr(self) == repr(other)
def __ne__(self, other):
return repr(self) != repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
def __hash__(self):
return hash(self.name)
def fromString(cls, string, start, ignoreErrors=False):
"""Decode dbf field definition from the string data.
Arguments:
string:
a string, dbf definition is decoded from. length of
the string must be 32 bytes.
start:
position in the database file.
ignoreErrors:
initial error processing mode for the new field (boolean)
"""
assert len(string) == 32
_length = string[16]
return cls(utils.unzfill(string)[:11].decode('utf-8'), _length,
string[17], start, start + _length, ignoreErrors=ignoreErrors)
fromString = classmethod(fromString)
def toString(self):
"""Return encoded field definition.
Return:
Return value is a string object containing encoded
definition of this field.
"""
_name = self.name.ljust(11, '\0')
return (
_name +
self.typeCode +
# data address
chr(0) * 4 +
chr(self.length) +
chr(self.decimalCount) +
chr(0) * 14
)
def __repr__(self):
return "%-10s %1s %3d %3d" % self.fieldInfo() # noqa: UP031
def fieldInfo(self):
"""Return field information.
Return:
Return value is a (name, type, length, decimals) tuple.
"""
return self.name, self.typeCode, self.length, self.decimalCount
def rawFromRecord(self, record):
"""Return a "raw" field value from the record string."""
return record[self.start:self.end]
def decodeFromRecord(self, record):
"""Return decoded field value from the record string."""
try:
return self.decodeValue(self.rawFromRecord(record))
except Exception:
if self.ignoreErrors:
return utils.INVALID_VALUE
else:
raise
def decodeValue(self, value):
"""Return decoded value from string value.
This method shouldn't be used publicly. It's called from the
`decodeFromRecord` method.
This is an abstract method and it must be overridden in child classes.
"""
raise NotImplementedError
def encodeValue(self, value):
"""Return str object containing encoded field value.
This is an abstract method and it must be overridden in child classes.
"""
raise NotImplementedError
# real classes
| DbfFieldDef |
python | catalyst-team__catalyst | catalyst/contrib/layers/pooling.py | {
"start": 985,
"end": 1776
} | class ____(nn.Module):
"""Applies a 2D global max pooling operation over an input signal
composed of several input planes.
@TODO: Docs (add `Example`). Contribution is welcome.
"""
def __init__(self):
"""Constructor method for the ``GlobalMaxPool2d`` class."""
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
h, w = x.shape[2:]
return F.max_pool2d(input=x, kernel_size=(h, w))
@staticmethod
def out_features(in_features):
"""Returns number of channels produced by the pooling.
Args:
in_features: number of channels in the input sample
Returns:
number of output features
"""
return in_features
| GlobalMaxPool2d |
python | django__django | django/template/backends/jinja2.py | {
"start": 334,
"end": 1803
} | class ____(BaseEngine):
app_dirname = "jinja2"
def __init__(self, params):
params = params.copy()
options = params.pop("OPTIONS").copy()
super().__init__(params)
self.context_processors = options.pop("context_processors", [])
environment = options.pop("environment", "jinja2.Environment")
environment_cls = import_string(environment)
if "loader" not in options:
options["loader"] = jinja2.FileSystemLoader(self.template_dirs)
options.setdefault("autoescape", True)
options.setdefault("auto_reload", settings.DEBUG)
options.setdefault(
"undefined", jinja2.DebugUndefined if settings.DEBUG else jinja2.Undefined
)
self.env = environment_cls(**options)
def from_string(self, template_code):
return Template(self.env.from_string(template_code), self)
def get_template(self, template_name):
try:
return Template(self.env.get_template(template_name), self)
except jinja2.TemplateNotFound as exc:
raise TemplateDoesNotExist(exc.name, backend=self) from exc
except jinja2.TemplateSyntaxError as exc:
new = TemplateSyntaxError(exc.args)
new.template_debug = get_exception_info(exc)
raise new from exc
@cached_property
def template_context_processors(self):
return [import_string(path) for path in self.context_processors]
| Jinja2 |
python | gevent__gevent | src/greentest/3.9/test_httplib.py | {
"start": 52207,
"end": 55116
} | class ____(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # Allowlist documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'IM_A_TEAPOT',
'MISDIRECTED_REQUEST',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'UNAVAILABLE_FOR_LEGAL_REASONS',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
'EARLY_HINTS',
'TOO_EARLY'
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
| OfflineTest |
python | joke2k__faker | faker/providers/internet/pt_BR/__init__.py | {
"start": 46,
"end": 607
} | class ____(InternetProvider):
safe_email_tlds = ("com", "net", "br", "br")
free_email_domains = (
"gmail.com",
"hotmail.com",
"yahoo.com.br",
"uol.com.br",
"bol.com.br",
"ig.com.br",
)
tlds = ("com", "com", "com", "net", "org", "br", "br", "br")
replacements = (
("à", "a"),
("â", "a"),
("ã", "a"),
("ç", "c"),
("é", "e"),
("ê", "e"),
("í", "i"),
("ô", "o"),
("ö", "o"),
("õ", "o"),
("ú", "u"),
)
| Provider |
python | numpy__numpy | benchmarks/benchmarks/bench_function_base.py | {
"start": 5311,
"end": 6769
} | class ____(Benchmark):
"""
This benchmark tests sorting performance with several
different types of arrays that are likely to appear in
real-world applications.
"""
params = [
# In NumPy 1.17 and newer, 'merge' can be one of several
# stable sorts, it isn't necessarily merge sort.
['quick', 'merge', 'heap'],
['float64', 'int64', 'float32', 'uint32', 'int32', 'int16', 'float16'],
[
('random',),
('ordered',),
('reversed',),
('uniform',),
('sorted_block', 10),
('sorted_block', 100),
('sorted_block', 1000),
],
]
param_names = ['kind', 'dtype', 'array_type']
# The size of the benchmarked arrays.
ARRAY_SIZE = 1000000
def setup(self, kind, dtype, array_type):
rnd = np.random.RandomState(507582308)
array_class = array_type[0]
generate_array_method = getattr(SortGenerator, array_class)
self.arr = generate_array_method(self.ARRAY_SIZE, dtype, *array_type[1:], rnd)
def time_sort(self, kind, dtype, array_type):
# Using np.sort(...) instead of arr.sort(...) because it makes a copy.
# This is important because the data is prepared once per benchmark, but
# used across multiple runs.
np.sort(self.arr, kind=kind)
def time_argsort(self, kind, dtype, array_type):
np.argsort(self.arr, kind=kind)
| Sort |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/5_Deep_Q_Network/maze_env.py | {
"start": 589,
"end": 4165
} | class ____(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.n_features = 2
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_W * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
# hell2_center = origin + np.array([UNIT, UNIT * 2])
# self.hell2 = self.canvas.create_rectangle(
# hell2_center[0] - 15, hell2_center[1] - 15,
# hell2_center[0] + 15, hell2_center[1] + 15,
# fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2]))/(MAZE_H*UNIT)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
next_coords = self.canvas.coords(self.rect) # next state
# reward function
if next_coords == self.canvas.coords(self.oval):
reward = 1
done = True
elif next_coords in [self.canvas.coords(self.hell1)]:
reward = -1
done = True
else:
reward = 0
done = False
s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2]))/(MAZE_H*UNIT)
return s_, reward, done
def render(self):
# time.sleep(0.01)
self.update()
| Maze |
python | getsentry__sentry | src/sentry/codecov/client.py | {
"start": 795,
"end": 932
} | class ____(SentryAPIException):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
code = "configuration-error"
| ConfigurationError |
python | kamyu104__LeetCode-Solutions | Python/find-the-longest-valid-obstacle-course-at-each-position.py | {
"start": 510,
"end": 3009
} | class ____(object): # 0-based index
def __init__(self, N,
build_fn=lambda x, y: [y]*(2*x),
query_fn=lambda x, y: y if x is None else max(x, y), # (lambda x, y: y if x is None else min(x, y))
update_fn=lambda x, y: y,
default_val=0):
self.N = N
self.H = (N-1).bit_length()
self.query_fn = query_fn
self.update_fn = update_fn
self.default_val = default_val
self.tree = build_fn(N, default_val)
self.lazy = [None]*N
def __apply(self, x, val):
self.tree[x] = self.update_fn(self.tree[x], val)
if x < self.N:
self.lazy[x] = self.update_fn(self.lazy[x], val)
def update(self, L, R, h): # Time: O(logN), Space: O(N)
def pull(x):
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
if self.lazy[x] is not None:
self.tree[x] = self.update_fn(self.tree[x], self.lazy[x])
L += self.N
R += self.N
L0, R0 = L, R
while L <= R:
if L & 1: # is right child
self.__apply(L, h)
L += 1
if R & 1 == 0: # is left child
self.__apply(R, h)
R -= 1
L //= 2
R //= 2
pull(L0)
pull(R0)
def query(self, L, R): # Time: O(logN), Space: O(N)
def push(x):
n = 2**self.H
while n != 1:
y = x // n
if self.lazy[y] is not None:
self.__apply(y*2, self.lazy[y])
self.__apply(y*2 + 1, self.lazy[y])
self.lazy[y] = None
n //= 2
result = None
if L > R:
return result
L += self.N
R += self.N
push(L)
push(R)
while L <= R:
if L & 1: # is right child
result = self.query_fn(result, self.tree[L])
L += 1
if R & 1 == 0: # is left child
result = self.query_fn(result, self.tree[R])
R -= 1
L //= 2
R //= 2
return result
def __str__(self):
showList = []
for i in xrange(self.N):
showList.append(self.query(i, i))
return ",".join(map(str, showList))
# Time: O(nlogn)
# Space: O(n)
# segment tree solution
| SegmentTree |
python | getsentry__sentry | src/sentry/sentry_apps/models/sentry_app.py | {
"start": 2882,
"end": 9846
} | class ____(ParanoidModel, HasApiScopes, Model):
__relocation_scope__ = RelocationScope.Global
application = models.OneToOneField(
"sentry.ApiApplication", null=True, on_delete=models.SET_NULL, related_name="sentry_app"
)
# Much of the OAuth system in place currently depends on a User existing.
# This "proxy user" represents the SentryApp in those cases.
proxy_user = models.OneToOneField(
"sentry.User", null=True, on_delete=models.SET_NULL, related_name="sentry_app"
)
# The Organization the Sentry App was created in "owns" it. Members of that
# Org have differing access, dependent on their role within the Org.
owner_id = HybridCloudForeignKey("sentry.Organization", on_delete="CASCADE")
name = models.TextField()
slug = SentrySlugField(max_length=SENTRY_APP_SLUG_MAX_LENGTH, unique=True, db_index=False)
author = models.TextField(null=True)
status = BoundedPositiveIntegerField(
default=SentryAppStatus.UNPUBLISHED, choices=SentryAppStatus.as_choices(), db_index=True
)
uuid = models.CharField(max_length=64, default=default_uuid, unique=True)
redirect_url = models.URLField(null=True)
webhook_url = models.URLField(max_length=512, null=True)
# does the application subscribe to `event.alert`,
# meaning can it be used in alert rules as a {service} ?
is_alertable = models.BooleanField(default=False)
# does the application need to wait for verification
# on behalf of the external service to know if its installations
# are successfully installed ?
verify_install = models.BooleanField(default=True)
events = ArrayField(models.TextField(), default=list)
overview = models.TextField(null=True)
schema = models.JSONField(default=dict)
date_added = models.DateTimeField(default=timezone.now)
date_updated = models.DateTimeField(default=timezone.now)
date_published = models.DateTimeField(null=True, blank=True)
creator_user = FlexibleForeignKey(
"sentry.User", null=True, on_delete=models.SET_NULL, db_constraint=False
)
creator_label = models.TextField(null=True)
popularity = models.PositiveSmallIntegerField(null=True, default=1)
metadata = models.JSONField(default=dict)
objects: ClassVar[SentryAppManager] = SentryAppManager()
class Meta:
app_label = "sentry"
db_table = "sentry_sentryapp"
@property
def is_published(self):
return self.status == SentryAppStatus.PUBLISHED
@property
def is_unpublished(self):
return self.status == SentryAppStatus.UNPUBLISHED
@property
def is_internal(self):
return self.status == SentryAppStatus.INTERNAL
@property
def is_publish_request_inprogress(self):
return self.status == SentryAppStatus.PUBLISH_REQUEST_INPROGRESS
@property
def slug_for_metrics(self):
if self.is_internal:
return "internal"
if self.is_unpublished:
return "unpublished"
return self.slug
def save(self, *args, **kwargs):
self.date_updated = timezone.now()
with outbox_context(transaction.atomic(using=router.db_for_write(SentryApp)), flush=False):
result = super().save(*args, **kwargs)
for outbox in self.outboxes_for_update():
outbox.save()
return result
def update(self, *args, **kwargs):
with outbox_context(transaction.atomic(using=router.db_for_write(SentryApp)), flush=False):
result = super().update(*args, **kwargs)
for outbox in self.outboxes_for_update():
outbox.save()
return result
def is_installed_on(self, organization):
from sentry.sentry_apps.models.sentry_app_installation import SentryAppInstallation
return SentryAppInstallation.objects.filter(
organization_id=organization.id,
sentry_app=self,
).exists()
def build_signature(self, body):
assert self.application is not None
secret = self.application.client_secret
return hmac.new(
key=secret.encode("utf-8"), msg=body.encode("utf-8"), digestmod=sha256
).hexdigest()
def show_auth_info(self, access):
from sentry.conf.server import SENTRY_TOKEN_ONLY_SCOPES
encoded_scopes = set({"%s" % scope for scope in list(access.scopes)})
# Exclude token-only scopes from the check since users don't have them in their roles
integration_scopes = set(self.scope_list) - SENTRY_TOKEN_ONLY_SCOPES
return integration_scopes.issubset(encoded_scopes)
def outboxes_for_update(self) -> list[ControlOutbox]:
return [
ControlOutbox(
shard_scope=OutboxScope.APP_SCOPE,
shard_identifier=self.id,
object_identifier=self.id,
category=OutboxCategory.SENTRY_APP_UPDATE,
region_name=region_name,
)
for region_name in find_all_region_names()
]
def outboxes_for_delete(self) -> list[ControlOutbox]:
return [
ControlOutbox(
shard_scope=OutboxScope.APP_SCOPE,
shard_identifier=self.id,
object_identifier=self.id,
category=OutboxCategory.SENTRY_APP_DELETE,
region_name=region_name,
payload={"slug": self.slug},
)
for region_name in find_all_region_names()
]
def regions_with_installations(self) -> set[str]:
return find_regions_for_sentry_app(self)
def delete(self, *args, **kwargs):
from sentry.sentry_apps.models.sentry_app_avatar import SentryAppAvatar
with outbox_context(transaction.atomic(using=router.db_for_write(SentryApp))):
for outbox in self.outboxes_for_update():
outbox.save()
for outbox in self.outboxes_for_delete():
outbox.save()
SentryAppAvatar.objects.filter(sentry_app=self).delete()
return super().delete(*args, **kwargs)
def _disable(self):
self.events = []
self.save(update_fields=["events"])
@classmethod
def sanitize_relocation_json(
cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None
) -> None:
model_name = get_model_name(cls) if model_name is None else model_name
super().sanitize_relocation_json(json, sanitizer, model_name)
sanitizer.set_string(json, SanitizableField(model_name, "author"))
sanitizer.set_string(json, SanitizableField(model_name, "creator_label"))
sanitizer.set_json(json, SanitizableField(model_name, "metadata"), {})
sanitizer.set_string(json, SanitizableField(model_name, "overview"))
sanitizer.set_json(json, SanitizableField(model_name, "schema"), {})
json["fields"]["events"] = "[]"
| SentryApp |
python | doocs__leetcode | solution/2500-2599/2532.Time to Cross a Bridge/Solution.py | {
"start": 0,
"end": 1595
} | class ____:
def findCrossingTime(self, n: int, k: int, time: List[List[int]]) -> int:
time.sort(key=lambda x: x[0] + x[2])
cur = 0
wait_in_left, wait_in_right = [], []
work_in_left, work_in_right = [], []
for i in range(k):
heappush(wait_in_left, -i)
while 1:
while work_in_left:
t, i = work_in_left[0]
if t > cur:
break
heappop(work_in_left)
heappush(wait_in_left, -i)
while work_in_right:
t, i = work_in_right[0]
if t > cur:
break
heappop(work_in_right)
heappush(wait_in_right, -i)
left_to_go = n > 0 and wait_in_left
right_to_go = bool(wait_in_right)
if not left_to_go and not right_to_go:
nxt = inf
if work_in_left:
nxt = min(nxt, work_in_left[0][0])
if work_in_right:
nxt = min(nxt, work_in_right[0][0])
cur = nxt
continue
if right_to_go:
i = -heappop(wait_in_right)
cur += time[i][2]
if n == 0 and not wait_in_right and not work_in_right:
return cur
heappush(work_in_left, (cur + time[i][3], i))
else:
i = -heappop(wait_in_left)
cur += time[i][0]
n -= 1
heappush(work_in_right, (cur + time[i][1], i))
| Solution |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 36633,
"end": 36862
} | class ____(BoringModel):
def on_validation_batch_end(self, outputs, batch, batch_idx):
if not self.trainer.sanity_checking and batch_idx == 1:
raise RuntimeError("Trouble!")
| TroubledModelOnValidationBatchEnd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.