language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numpy__numpy | benchmarks/benchmarks/bench_records.py | {
"start": 52,
"end": 1391
} | class ____(Benchmark):
def setup(self):
self.l50 = np.arange(1000)
self.fields_number = 10000
self.arrays = [self.l50 for _ in range(self.fields_number)]
self.formats = [self.l50.dtype.str for _ in range(self.fields_number)]
self.formats_str = ','.join(self.formats)
self.dtype_ = np.dtype(
[
(f'field_{i}', self.l50.dtype.str)
for i in range(self.fields_number)
]
)
self.buffer = self.l50.tobytes() * self.fields_number
def time_fromarrays_w_dtype(self):
np._core.records.fromarrays(self.arrays, dtype=self.dtype_)
def time_fromarrays_wo_dtype(self):
np._core.records.fromarrays(self.arrays)
def time_fromarrays_formats_as_list(self):
np._core.records.fromarrays(self.arrays, formats=self.formats)
def time_fromarrays_formats_as_string(self):
np._core.records.fromarrays(self.arrays, formats=self.formats_str)
def time_frombytes_w_dtype(self):
np._core.records.fromstring(self.buffer, dtype=self.dtype_)
def time_frombytes_formats_as_list(self):
np._core.records.fromstring(self.buffer, formats=self.formats)
def time_frombytes_formats_as_string(self):
np._core.records.fromstring(self.buffer, formats=self.formats_str)
| Records |
python | facebookresearch__faiss | tests/test_standalone_codec.py | {
"start": 10041,
"end": 10616
} | class ____(unittest.TestCase):
def test_transfer(self):
ds = SyntheticDataset(32, 2000, 200, 100)
index = faiss.index_factory(ds.d, "IVF20,SQ8")
index.train(ds.get_train())
index.add(ds.get_database())
Dref, Iref = index.search(ds.get_queries(), 10)
index.reset()
codes = index.sa_encode(ds.get_database())
index.add_sa_codes(codes)
Dnew, Inew = index.search(ds.get_queries(), 10)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_equal(Dref, Dnew)
| TestIVFTransfer |
python | getsentry__sentry | tests/sentry/backup/test_exhaustive.py | {
"start": 664,
"end": 2764
} | class ____(BackupTransactionTestCase):
"""
Ensure that a database with all exportable models filled out still works.
"""
# Note: the "clean_pks" version of this test lives in
# `test_sanitize.py::SanitizationExhaustiveTests`. Because these tests are slow, we want to
# reduce duplication, so we only use that one in that particular location.
@expect_models(EXHAUSTIVELY_TESTED, "__all__")
def test_exhaustive_dirty_pks(self, expected_models: list[type[Model]]) -> None:
self.create_exhaustive_instance(is_superadmin=True)
actual = self.import_export_then_validate(self._testMethodName, reset_pks=False)
verify_models_in_output(expected_models, actual)
@expect_models(UNIQUENESS_TESTED, "__all__")
def test_uniqueness(self, expected_models: list[type[Model]]) -> None:
self.create_exhaustive_instance(is_superadmin=True)
with tempfile.TemporaryDirectory() as tmp_dir:
# Export the data once.
tmp_expect = Path(tmp_dir).joinpath(f"{self._testMethodName}.expect.json")
export_to_file(tmp_expect, ExportScope.Global)
clear_database(reset_pks=False)
# Now import twice, so that all random values in the export (UUIDs etc) are identical,
# to test that these are properly replaced and handled.
with open(tmp_expect, "rb") as tmp_file:
import_in_global_scope(tmp_file, printer=NOOP_PRINTER)
with open(tmp_expect, "rb") as tmp_file:
# Back-to-back global scope imports are disallowed (global scope assume a clean
# database), so use organization and config scope instead.
import_in_organization_scope(tmp_file, printer=NOOP_PRINTER)
tmp_file.seek(0)
import_in_config_scope(tmp_file, printer=NOOP_PRINTER)
tmp_actual = Path(tmp_dir).joinpath(f"{self._testMethodName}.actual.json")
actual = export_to_file(tmp_actual, ExportScope.Global)
verify_models_in_output(expected_models, actual)
| ExhaustiveTests |
python | realpython__materials | queue/src/thread_safe_queues.py | {
"start": 1160,
"end": 1921
} | class ____(threading.Thread):
def __init__(self, speed, buffer):
super().__init__(daemon=True)
self.speed = speed
self.buffer = buffer
self.product = None
self.working = False
self.progress = 0
@property
def state(self):
if self.working:
return f"{self.product} ({self.progress}%)"
return ":zzz: Idle"
def simulate_idle(self):
self.product = None
self.working = False
self.progress = 0
sleep(randint(1, 3))
def simulate_work(self):
self.working = True
self.progress = 0
delay = randint(1, 1 + 15 // self.speed)
for _ in range(100):
sleep(delay / 100)
self.progress += 1
| Worker |
python | wandb__wandb | wandb/sdk/data_types/_dtypes.py | {
"start": 25940,
"end": 29988
} | class ____(Type):
"""Represents a dictionary object where each key can have a type."""
name = "typedDict"
legacy_names = ["dictionary"]
types: t.ClassVar[t.List[type]] = [dict]
def __init__(
self,
type_map: t.Optional[t.Dict[str, ConvertibleToType]] = None,
):
if type_map is None:
type_map = {}
self.params.update(
{
"type_map": {
key: TypeRegistry.type_from_dtype(type_map[key]) for key in type_map
}
}
)
@classmethod
def from_obj(cls, py_obj: t.Optional[t.Any] = None) -> "TypedDictType":
if not isinstance(py_obj, dict):
TypeError("TypedDictType.from_obj expects a dictionary")
assert isinstance(py_obj, dict) # helps mypy type checker
return cls({key: TypeRegistry.type_of(py_obj[key]) for key in py_obj})
def assign_type(self, wb_type: "Type") -> t.Union["TypedDictType", InvalidType]:
if (
isinstance(wb_type, TypedDictType)
and len(
set(wb_type.params["type_map"].keys())
- set(self.params["type_map"].keys())
)
== 0
):
type_map = {}
for key in self.params["type_map"]:
type_map[key] = self.params["type_map"][key].assign_type(
wb_type.params["type_map"].get(key, UnknownType())
)
if isinstance(type_map[key], InvalidType):
return InvalidType()
return TypedDictType(type_map)
return InvalidType()
def assign(
self, py_obj: t.Optional[t.Any] = None
) -> t.Union["TypedDictType", InvalidType]:
if (
isinstance(py_obj, dict)
and len(set(py_obj.keys()) - set(self.params["type_map"].keys())) == 0
):
type_map = {}
for key in self.params["type_map"]:
type_map[key] = self.params["type_map"][key].assign(
py_obj.get(key, None)
)
if isinstance(type_map[key], InvalidType):
return InvalidType()
return TypedDictType(type_map)
return InvalidType()
def explain(self, other: t.Any, depth=0) -> str:
exp = super().explain(other, depth)
gap = "".join(["\t"] * depth)
if isinstance(other, dict):
extra_keys = set(other.keys()) - set(self.params["type_map"].keys())
if len(extra_keys) > 0:
exp += "\n{}Found extra keys: {}".format(
gap, ",".join(list(extra_keys))
)
for key in self.params["type_map"]:
val = other.get(key, None)
if isinstance(self.params["type_map"][key].assign(val), InvalidType):
exp += "\n{}Key '{}':\n{}".format(
gap,
key,
self.params["type_map"][key].explain(val, depth=depth + 1),
)
return exp
def __repr__(self):
return "{}".format(self.params["type_map"])
# Special Types
TypeRegistry.add(InvalidType)
TypeRegistry.add(AnyType)
TypeRegistry.add(UnknownType)
# Types with default type mappings
TypeRegistry.add(NoneType)
TypeRegistry.add(StringType)
TypeRegistry.add(TimestampType)
TypeRegistry.add(NumberType)
TypeRegistry.add(BooleanType)
TypeRegistry.add(ListType)
TypeRegistry.add(TypedDictType)
# Types without default type mappings
TypeRegistry.add(UnionType)
TypeRegistry.add(PythonObjectType)
TypeRegistry.add(ConstType)
# Common Industry Types
TypeRegistry.add(NDArrayType)
__all__ = [
"TypeRegistry",
"InvalidType",
"UnknownType",
"AnyType",
"NoneType",
"StringType",
"NumberType",
"TimestampType",
"BooleanType",
"ListType",
"TypedDictType",
"UnionType",
"PythonObjectType",
"ConstType",
"OptionalType",
"Type",
"NDArrayType",
]
| TypedDictType |
python | python-poetry__poetry | src/poetry/pyproject/toml.py | {
"start": 338,
"end": 1740
} | class ____(BasePyProjectTOML):
"""
Enhanced version of poetry-core's PyProjectTOML
which is capable of writing pyproject.toml
The poetry-core class uses tomli to read the file,
here we use tomlkit to preserve comments and formatting when writing.
"""
def __init__(self, path: Path) -> None:
super().__init__(path)
self._toml_file = TOMLFile(path=path)
self._toml_document: TOMLDocument | None = None
@property
def file(self) -> TOMLFile:
return self._toml_file
@property
def data(self) -> TOMLDocument:
if self._toml_document is None:
if not self.file.exists():
self._toml_document = TOMLDocument()
else:
self._toml_document = self.file.read()
return self._toml_document
def save(self) -> None:
data = self.data
if self._build_system is not None:
if "build-system" not in data:
data["build-system"] = table()
build_system = data["build-system"]
assert isinstance(build_system, Table)
build_system["requires"] = self._build_system.requires
build_system["build-backend"] = self._build_system.build_backend
self.file.write(data=data)
def reload(self) -> None:
self._toml_document = None
self._build_system = None
| PyProjectTOML |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 28846,
"end": 28915
} | class ____(SetTest):
collection_class = SetCollection
| CustomSetTest |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Operators.py | {
"start": 2779,
"end": 3036
} | class ____(BinOpNode):
"""Returns A / B. Does not check input types."""
nodeName = 'Divide'
def __init__(self, name):
# try truediv first, followed by div
BinOpNode.__init__(self, name, ('__truediv__', '__div__'))
| DivideNode |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-slack/components.py | {
"start": 9262,
"end": 11923
} | class ____(HttpRequester):
"""
Redefines Custom API Budget to handle rate limits.
"""
url_match: str = None
# redefine this here to set up in InterpolatedRequestOptionsProvider in __post_init__
request_parameters: Dict[str, Any] = None
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
self._url = InterpolatedString.create(self.url if self.url else EmptyString, parameters=parameters)
# deprecated
self._url_base = InterpolatedString.create(self.url_base if self.url_base else EmptyString, parameters=parameters)
# deprecated
self._path = InterpolatedString.create(self.path if self.path else EmptyString, parameters=parameters)
if self.request_options_provider is None:
self._request_options_provider = InterpolatedRequestOptionsProvider(
config=self.config,
parameters=parameters,
request_parameters=self.request_parameters,
)
elif isinstance(self.request_options_provider, dict):
self._request_options_provider = InterpolatedRequestOptionsProvider(config=self.config, **self.request_options_provider)
else:
self._request_options_provider = self.request_options_provider
self._authenticator = self.authenticator or NoAuth(parameters=parameters)
self._http_method = HttpMethod[self.http_method] if isinstance(self.http_method, str) else self.http_method
self.error_handler = self.error_handler
self._parameters = parameters
if self.error_handler is not None and hasattr(self.error_handler, "backoff_strategies"):
backoff_strategies = self.error_handler.backoff_strategies # type: ignore
else:
backoff_strategies = None
api_budget = (
MessagesAndThreadsApiBudget(
policies=[
UnlimitedCallRatePolicy(
matchers=[HttpRequestMatcher(url=self.url_match)],
)
]
)
if self.config.get("credentials", {}).get("option_title") == "Default OAuth2.0 authorization"
else None
)
self._http_client = HttpClient(
name=self.name,
logger=self.logger,
error_handler=self.error_handler,
api_budget=api_budget,
authenticator=self._authenticator,
use_cache=self.use_cache,
backoff_strategy=backoff_strategies,
disable_retries=self.disable_retries,
message_repository=self.message_repository,
)
| MessagesAndThreadsHttpRequester |
python | huggingface__transformers | src/transformers/models/mixtral/modeling_mixtral.py | {
"start": 18341,
"end": 19472
} | class ____(PreTrainedModel):
config: MixtralConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["MixtralDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
_supports_attention_backend = True
_can_record_outputs = {
"router_logits": OutputRecorder(MixtralTopKRouter, index=0),
"hidden_states": MixtralDecoderLayer,
"attentions": MixtralAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
std = self.config.initializer_range
if isinstance(module, MixtralExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=std)
init.normal_(module.down_proj, mean=0.0, std=std)
elif isinstance(module, MixtralTopKRouter):
init.normal_(module.weight, mean=0.0, std=std)
@auto_docstring
| MixtralPreTrainedModel |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/lookup_ops_test.py | {
"start": 93547,
"end": 104002
} | class ____(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_multicolumn_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1"))
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_column_index=0,
value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_multicolumn_file_custom_delimiter(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1"))
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_column_index=0,
value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
delimiter=" ")
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
if not context.executing_eagerly():
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
@test_util.run_v1_only("placeholder usage")
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, default_value), self.evaluate(ids))
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
self.evaluate(ids))
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError, lookup_ops.index_table_from_file, vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError, lookup_ops.index_table_from_file, vocabulary_file=None)
def test_index_table_from_file_str_fails_with_zero_size_vocabulary(self):
vocabulary_file = self._createVocabFile("zero_vocab_str.txt")
self.assertRaisesRegex(
ValueError, "`vocab_size` must be greater than 0, got 0 for "
"vocabulary_file: .*zero_vocab_str.txt",
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
def test_index_table_from_file_tensor_fails_with_zero_size_vocabulary(self):
vocabulary_file = constant_op.constant(
self._createVocabFile("zero_vocab_tensor.txt"))
self.assertRaisesRegex(
ValueError, "`vocab_size` must be greater than 0, got 0 for "
"vocabulary_file: .*zero_vocab_tensor.txt",
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, -1, -1), self.evaluate(ids))
self.assertEqual(2, self.evaluate(table.size()))
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"Invalid vocab_size"):
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.evaluate(table.initializer)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(ids)
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual((1, 2, -1), self.evaluate(ids))
self.assertEqual(3, self.evaluate(table.size()))
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.assertRaises(TypeError):
lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
def test_index_table_from_file_table_ref_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab9.txt")
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
self.assertIsNotNone(table.resource_handle)
def test_index_table_from_file_table_ref_without_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab10.txt")
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=0)
self.assertIsNotNone(table.resource_handle)
| IndexTableFromFile |
python | spyder-ide__spyder | spyder/plugins/shortcuts/widgets/table.py | {
"start": 17727,
"end": 18535
} | class ____(SpyderShortcutsMixin):
"""Shortcut convenience class for holding shortcut context, name,
original ordering index, key sequence for the shortcut and localized text.
"""
def __init__(self, context, name, key=None, plugin_name=None):
self.index = 0 # Sorted index. Populated when loading shortcuts
self.context = context
self.name = name
self.key = key
self.plugin_name = plugin_name
def __str__(self):
return "{0}/{1}: {2}".format(self.context, self.name, self.key)
def load(self):
self.key = self.get_shortcut(self.name, self.context, self.plugin_name)
def save(self):
self.set_shortcut(self.key, self.name, self.context, self.plugin_name)
CONTEXT, NAME, SEQUENCE, SEARCH_SCORE = [0, 1, 2, 3]
| Shortcut |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 31110,
"end": 33362
} | class ____(unittest.TestCase):
def setUp(self):
self.server_ioloop = IOLoop(make_current=False)
event = threading.Event()
@gen.coroutine
def init_server():
sock, self.port = bind_unused_port()
app = Application([("/", HelloWorldHandler)])
self.server = HTTPServer(app)
self.server.add_socket(sock)
event.set()
def start():
self.server_ioloop.run_sync(init_server)
self.server_ioloop.start()
self.server_thread = threading.Thread(target=start)
self.server_thread.start()
event.wait()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by several iterations because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticeable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
@gen.coroutine
def slow_stop():
yield self.server.close_all_connections()
# The number of iterations is difficult to predict. Typically,
# one is sufficient, although sometimes it needs more.
for i in range(5):
yield
self.server_ioloop.stop()
self.server_ioloop.add_callback(slow_stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return "http://127.0.0.1:%d%s" % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url("/"))
self.assertEqual(b"Hello world!", response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url("/notfound"))
self.assertEqual(assertion.exception.code, 404)
| SyncHTTPClientTest |
python | scrapy__scrapy | tests/test_downloader_handler_twisted_http10.py | {
"start": 1296,
"end": 1432
} | class ____(TestHttp10):
is_secure = True
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
| TestHttps10 |
python | donnemartin__interactive-coding-challenges | graphs_trees/trie/trie.py | {
"start": 38,
"end": 232
} | class ____(object):
def __init__(self, key, parent=None, terminates=False):
self.key = key
self.terminates = False
self.parent = parent
self.children = {}
| Node |
python | sphinx-doc__sphinx | tests/test_ext_intersphinx/test_ext_intersphinx_cache.py | {
"start": 1869,
"end": 3422
} | class ____:
def __init__(
self,
*,
name: str = 'spam',
version: str | int = 1,
baseurl: str = '',
baseuri: str = '',
file: str | None = None,
) -> None:
#: The project name.
self.name = name
#: The escaped project name.
self.safe_name = re.sub(r'\\s+', ' ', name)
#: The project version as a string.
self.version = version = str(version)
#: The escaped project version.
self.safe_version = re.sub(r'\\s+', ' ', version)
#: The project base URL (e.g., http://localhost:9341).
self.baseurl = baseurl
#: The project base URI, relative to *baseurl* (e.g., 'spam').
self.uri = baseuri
#: The project URL, as specified in :confval:`intersphinx_mapping`.
self.url = posixpath.join(baseurl, baseuri)
#: The project local file, if any.
self.file = file
@property
def record(self) -> dict[str, tuple[str | None, str | None]]:
"""The :confval:`intersphinx_mapping` record for this project."""
return {self.name: (self.url, self.file)}
def normalise(self, entry: InventoryEntry) -> tuple[str, _InventoryItem]:
"""Format an inventory entry as if it were part of this project."""
return entry.name, _InventoryItem(
project_name=self.safe_name,
project_version=self.safe_version,
uri=posixpath.join(self.url, entry.uri),
display_name=entry.display_name,
)
| IntersphinxProject |
python | realpython__materials | tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/frontends/console/args.py | {
"start": 357,
"end": 1203
} | class ____(NamedTuple):
player1: Player
player2: Player
starting_mark: Mark
def parse_args() -> Args:
parser = argparse.ArgumentParser()
parser.add_argument(
"-X",
dest="player_x",
choices=PLAYER_CLASSES.keys(),
default="human",
)
parser.add_argument(
"-O",
dest="player_o",
choices=PLAYER_CLASSES.keys(),
default="minimax",
)
parser.add_argument(
"--starting",
dest="starting_mark",
choices=Mark,
type=Mark,
default="X",
)
args = parser.parse_args()
player1 = PLAYER_CLASSES[args.player_x](Mark("X"))
player2 = PLAYER_CLASSES[args.player_o](Mark("O"))
if args.starting_mark == "O":
player1, player2 = player2, player1
return Args(player1, player2, args.starting_mark)
| Args |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 41732,
"end": 42213
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`):
Hidden-state at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Hidden-states of the model at the output of each layer.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
| PatchTSMixerEncoderOutput |
python | doocs__leetcode | lcp/LCP 77. 符文储备/Solution.py | {
"start": 0,
"end": 298
} | class ____:
def runeReserve(self, runes: List[int]) -> int:
runes.sort()
ans = i = 0
for j, x in enumerate(runes):
if j and runes[j] - runes[j - 1] > 1:
i = j
else:
ans = max(ans, j - i + 1)
return ans
| Solution |
python | bokeh__bokeh | src/bokeh/models/expressions.py | {
"start": 7341,
"end": 7733
} | class ____(Expression):
""" Base class for coordinate transforms. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@property
def x(self):
return XComponent(transform=self)
@property
def y(self):
return YComponent(transform=self)
| CoordinateTransform |
python | automl__auto-sklearn | autosklearn/pipeline/components/feature_preprocessing/__init__.py | {
"start": 849,
"end": 5180
} | class ____(AutoSklearnChoice):
@classmethod
def get_components(cls):
components = OrderedDict()
components.update(_preprocessors)
components.update(additional_components.components)
return components
def get_available_components(
self, dataset_properties=None, include=None, exclude=None
):
if dataset_properties is None:
dataset_properties = {}
if include is not None and exclude is not None:
raise ValueError(
"The argument include and exclude cannot be used together."
)
available_comp = self.get_components()
if include is not None:
for incl in include:
if incl not in available_comp:
raise ValueError(
"Trying to include unknown component: " "%s" % incl
)
# TODO check for task type classification and/or regression!
components_dict = OrderedDict()
for name in available_comp:
if include is not None and name not in include:
continue
elif exclude is not None and name in exclude:
continue
entry = available_comp[name]
# Exclude itself to avoid infinite loop
if entry == FeaturePreprocessorChoice or hasattr(entry, "get_components"):
continue
target_type = dataset_properties["target_type"]
if target_type == "classification":
if entry.get_properties()["handles_classification"] is False:
continue
if (
dataset_properties.get("multiclass") is True
and entry.get_properties()["handles_multiclass"] is False
):
continue
if (
dataset_properties.get("multilabel") is True
and entry.get_properties()["handles_multilabel"] is False
):
continue
elif target_type == "regression":
if entry.get_properties()["handles_regression"] is False:
continue
if (
dataset_properties.get("multioutput") is True
and entry.get_properties()["handles_multioutput"] is False
):
continue
else:
raise ValueError("Unknown target type %s" % target_type)
components_dict[name] = entry
return components_dict
def get_hyperparameter_search_space(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties=None,
default=None,
include=None,
exclude=None,
):
cs = ConfigurationSpace()
if dataset_properties is None:
dataset_properties = {}
# Compile a list of legal preprocessors for this problem
available_preprocessors = self.get_available_components(
dataset_properties=dataset_properties, include=include, exclude=exclude
)
if len(available_preprocessors) == 0:
raise ValueError("No preprocessors found, please add NoPreprocessing")
if default is None:
defaults = ["no_preprocessing", "select_percentile", "pca", "truncatedSVD"]
for default_ in defaults:
if default_ in available_preprocessors:
default = default_
break
preprocessor = CategoricalHyperparameter(
"__choice__", list(available_preprocessors.keys()), default_value=default
)
cs.add_hyperparameter(preprocessor)
for name in available_preprocessors:
preprocessor_configuration_space = available_preprocessors[
name
].get_hyperparameter_search_space(dataset_properties=dataset_properties)
parent_hyperparameter = {"parent": preprocessor, "value": name}
cs.add_configuration_space(
name,
preprocessor_configuration_space,
parent_hyperparameter=parent_hyperparameter,
)
return cs
def transform(self, X):
return self.choice.transform(X)
| FeaturePreprocessorChoice |
python | automl__auto-sklearn | test/test_pipeline/components/data_preprocessing/test_balancing.py | {
"start": 1419,
"end": 8915
} | class ____(unittest.TestCase):
def test_balancing_get_weights_treed_single_label(self):
Y = np.array([0] * 80 + [1] * 20)
balancing = Balancing(strategy="weighting")
init_params, fit_params = balancing.get_weights(Y, "adaboost", None, None, None)
self.assertAlmostEqual(
np.mean(fit_params["classifier:sample_weight"]),
1,
)
np.testing.assert_allclose(
fit_params["classifier:sample_weight"],
np.array([0.625] * 80 + [2.5] * 20),
)
def test_balancing_get_weights_treed_multilabel(self):
Y = np.array(
[[0, 0, 0]] * 100
+ [[1, 0, 0]] * 100
+ [[0, 1, 0]] * 100
+ [[1, 1, 0]] * 100
+ [[0, 0, 1]] * 100
+ [[1, 0, 1]] * 10
)
balancing = Balancing(strategy="weighting")
init_params, fit_params = balancing.get_weights(Y, "adaboost", None, None, None)
print(fit_params["classifier:sample_weight"])
self.assertAlmostEqual(
np.mean(fit_params["classifier:sample_weight"]),
1,
)
np.testing.assert_allclose(
fit_params["classifier:sample_weight"],
np.array([0.85] * 500 + [8.5] * 10),
)
def test_balancing_get_weights_svm_sgd(self):
Y = np.array([0] * 80 + [1] * 20)
balancing = Balancing(strategy="weighting")
init_params, fit_params = balancing.get_weights(
Y, "libsvm_svc", None, None, None
)
self.assertEqual(
("classifier:class_weight", "balanced"), list(init_params.items())[0]
)
init_params, fit_params = balancing.get_weights(
Y, None, "liblinear_svc_preprocessor", None, None
)
self.assertEqual(
("feature_preprocessor:class_weight", "balanced"),
list(init_params.items())[0],
)
def test_weighting_effect(self):
data = sklearn.datasets.make_classification(
n_samples=200,
n_features=10,
n_redundant=2,
n_informative=2,
n_repeated=2,
n_clusters_per_class=2,
weights=[0.8, 0.2],
random_state=1,
)
for name, clf, acc_no_weighting, acc_weighting, places in [
("adaboost", AdaboostClassifier, 0.810, 0.735, 3),
("decision_tree", DecisionTree, 0.780, 0.643, 3),
("extra_trees", ExtraTreesClassifier, 0.78, 0.8, 3),
("random_forest", RandomForest, 0.75, 0.789, 3),
("libsvm_svc", LibSVM_SVC, 0.769, 0.72, 3),
("liblinear_svc", LibLinear_SVC, 0.762, 0.735, 3),
("passive_aggressive", PassiveAggressive, 0.16, 0.222, 3),
("sgd", SGD, 0.818, 0.567, 2),
("gradient_boosting", GradientBoostingClassifier, 0.666, 0.682, 2),
]:
for strategy, acc in [
("none", acc_no_weighting),
("weighting", acc_weighting),
]:
# Fit
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
model_args = {
"random_state": 1,
"include": {
"classifier": [name],
"feature_preprocessor": ["no_preprocessing"],
},
}
classifier = SimpleClassificationPipeline(**model_args)
cs = classifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
default._values["balancing:strategy"] = strategy
classifier = SimpleClassificationPipeline(config=default, **model_args)
classifier.fit(X_train, Y_train)
predictions1 = classifier.predict(X_test)
self.assertAlmostEqual(
sklearn.metrics.f1_score(predictions1, Y_test),
acc,
places=places,
msg=(name, strategy),
)
# fit_transformer and fit_estimator
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
classifier = SimpleClassificationPipeline(config=default, **model_args)
Xt, fit_params = classifier.fit_transformer(X_train, Y_train)
classifier.fit_estimator(Xt, Y_train, **fit_params)
predictions2 = classifier.predict(X_test)
np.testing.assert_allclose(
predictions1,
predictions2,
err_msg=f"name = {name}, strategy = {strategy}",
)
self.assertAlmostEqual(
sklearn.metrics.f1_score(predictions2, Y_test),
acc,
places=places,
msg=(name, strategy),
)
for name, pre, acc_no_weighting, acc_weighting in [
(
"extra_trees_preproc_for_classification",
ExtraTreesPreprocessorClassification,
0.810,
0.590,
),
("liblinear_svc_preprocessor", LibLinear_Preprocessor, 0.837, 0.562),
]:
for strategy, acc in [
("none", acc_no_weighting),
("weighting", acc_weighting),
]:
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
include = {"classifier": ["sgd"], "feature_preprocessor": [name]}
classifier = SimpleClassificationPipeline(
random_state=1, include=include
)
cs = classifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
default._values["balancing:strategy"] = strategy
classifier.set_hyperparameters(default)
predictor = classifier.fit(X_train, Y_train)
predictions = predictor.predict(X_test)
self.assertAlmostEqual(
sklearn.metrics.f1_score(predictions, Y_test),
acc,
places=3,
msg=(name, strategy),
)
# fit_transformer and fit_estimator
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
default._values["balancing:strategy"] = strategy
classifier = SimpleClassificationPipeline(
config=default, random_state=1, include=include
)
Xt, fit_params = classifier.fit_transformer(X_train, Y_train)
classifier.fit_estimator(Xt, Y_train, **fit_params)
predictions = classifier.predict(X_test)
self.assertAlmostEqual(
sklearn.metrics.f1_score(predictions, Y_test), acc, places=3
)
| BalancingComponentTest |
python | weaviate__weaviate-python-client | weaviate/collections/grpc/shared.py | {
"start": 27792,
"end": 28429
} | class ____:
@staticmethod
def decode_float32s(byte_vector: bytes) -> List[float]:
return [
float(val) for val in struct.unpack(f"{len(byte_vector) // UINT32_LEN}f", byte_vector)
]
@staticmethod
def decode_float64s(byte_vector: bytes) -> List[float]:
return [
float(val) for val in struct.unpack(f"{len(byte_vector) // UINT64_LEN}d", byte_vector)
]
@staticmethod
def decode_int64s(byte_vector: bytes) -> List[int]:
return [
int(val) for val in struct.unpack(f"{len(byte_vector) // UINT64_LEN}q", byte_vector)
]
@dataclass
| _ByteOps |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/dbt_project_manager.py | {
"start": 1793,
"end": 2301
} | class ____(DbtProjectManager):
"""Wraps a DbtProject that has already been fully instantiated. Used for cases where a
user directly provides a DbtProject to the DbtProjectComponent.
"""
project: "DbtProject"
@property
def defs_state_discriminator(self) -> str:
return self.project.name
def sync(self, state_path: Path) -> None:
pass
def get_project(self, state_path: Optional[Path]) -> DbtProject:
return self.project
@dataclass
| NoopDbtProjectManager |
python | pytorch__pytorch | test/inductor/test_metrics.py | {
"start": 2089,
"end": 4495
} | class ____(TestCase):
def test_parse_proper_kernel_fn_code(self):
proper_kernel_fn_code = metrics._parse_proper_kernel_fn_code(example_kernel)
assert proper_kernel_fn_code.startswith("def ")
def test_count_args(self):
proper_kernel_fn_code = metrics._parse_proper_kernel_fn_code(example_kernel)
self.assertEqual(6, metrics._count_args(proper_kernel_fn_code))
def test_count_pattern(self):
proper_kernel_fn_code = metrics._parse_proper_kernel_fn_code(example_kernel)
self.assertEqual(2, metrics._count_pattern(proper_kernel_fn_code, "tl.load"))
self.assertEqual(1, metrics._count_pattern(proper_kernel_fn_code, "tl.store"))
self.assertEqual(1, metrics._count_pattern(proper_kernel_fn_code, "for "))
def test_parse_reduction_hint(self):
kernel_category = get_kernel_category_by_source_code(example_kernel)
self.assertEqual("reduction", kernel_category)
self.assertEqual(
"INNER", metrics._parse_reduction_hint(kernel_category, example_kernel)
)
@config.patch("fx_graph_remote_cache", False)
def test_atomic_add(self):
@torch.compile
def f(lhs, index, rhs):
return lhs.index_put_([index], rhs, accumulate=True)
lhs = torch.randn(1024, device=GPU_TYPE)
index = torch.randint(0, 1024, [32], device=GPU_TYPE, dtype=torch.int32)
rhs = torch.randn(32, device=GPU_TYPE)
kernel_list = []
with collect_defined_kernels(kernel_list):
f(lhs, index, rhs)
self.assertEqual(len(kernel_list), 1)
kernel_code = kernel_list[0]
self.assertEqual(metrics._count_pattern(kernel_code, "tl.atomic_add"), 1)
@largeTensorTest(25e7 * 2 * 4, device=GPU_TYPE, inductor=True)
@config.patch("fx_graph_remote_cache", False)
@config.patch("benchmark_kernel", True)
def test_kernel_args_num_gb(self):
@torch.compile
def f(x):
return x + 1
x = torch.randn(int(25e7), device=GPU_TYPE)
kernel_list = []
with collect_defined_kernels(kernel_list):
f(x)
self.assertEqual(len(kernel_list), 1)
kernel_code = kernel_list[0]
self.assertEqual(
metrics._parse_kernel_args_num_gb(kernel_code, "pointwise"), 2.0
)
if __name__ == "__main__":
if HAS_GPU:
run_tests()
| TestMetrics |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_qtcairo.py | {
"start": 165,
"end": 1696
} | class ____(FigureCanvasCairo, FigureCanvasQT):
def draw(self):
if hasattr(self._renderer.gc, "ctx"):
self._renderer.dpi = self.figure.dpi
self.figure.draw(self._renderer)
super().draw()
def paintEvent(self, event):
width = int(self.device_pixel_ratio * self.width())
height = int(self.device_pixel_ratio * self.height())
if (width, height) != self._renderer.get_canvas_width_height():
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self._renderer.set_context(cairo.Context(surface))
self._renderer.dpi = self.figure.dpi
self.figure.draw(self._renderer)
buf = self._renderer.gc.ctx.get_target().get_data()
if QT_API == "PyQt6":
from PyQt6 import sip
ptr = int(sip.voidptr(buf))
else:
ptr = buf
qimage = QtGui.QImage(
ptr, width, height,
QtGui.QImage.Format.Format_ARGB32_Premultiplied)
# Adjust the buf reference count to work around a memory leak bug in
# QImage under PySide.
if QT_API == "PySide2" and QtCore.__version_info__ < (5, 12):
ctypes.c_long.from_address(id(buf)).value = 1
qimage.setDevicePixelRatio(self.device_pixel_ratio)
painter = QtGui.QPainter(self)
painter.eraseRect(event.rect())
painter.drawImage(0, 0, qimage)
self._draw_rect_callback(painter)
painter.end()
@_BackendQT.export
| FigureCanvasQTCairo |
python | pyca__cryptography | src/cryptography/hazmat/primitives/hashes.py | {
"start": 1290,
"end": 2052
} | class ____(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def algorithm(self) -> HashAlgorithm:
"""
A HashAlgorithm that will be used by this context.
"""
@abc.abstractmethod
def update(self, data: Buffer) -> None:
"""
Processes the provided bytes through the hash.
"""
@abc.abstractmethod
def finalize(self) -> bytes:
"""
Finalizes the hash context and returns the hash digest as bytes.
"""
@abc.abstractmethod
def copy(self) -> HashContext:
"""
Return a HashContext that is a copy of the current context.
"""
Hash = rust_openssl.hashes.Hash
HashContext.register(Hash)
XOFHash = rust_openssl.hashes.XOFHash
| HashContext |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_format01.py | {
"start": 315,
"end": 1512
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format01.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [46335872, 46365696]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | bokeh__bokeh | tests/unit/bokeh/util/test_strings.py | {
"start": 2198,
"end": 2999
} | class ____:
def test_no_arguments(self) -> None:
assert bus.format_url_query_arguments("url") == "url"
@pytest.mark.parametrize('value', ["10", "10.2", "bar", "a b", "a&b", "'ab'", "a\"b", "a@b", "a?b", "a:b", "a/b", "a=b"])
def test_one_argument(self, value: str) -> None:
assert bus.format_url_query_arguments("url", dict(foo=value)) == f"url?foo={quote_plus(value)}"
def test_two_arguments(self) -> None:
assert bus.format_url_query_arguments("url", dict(foo="10", bar="a b")) == "url?foo=10&bar=a+b"
def test_several_arguments(self) -> None:
args = dict(foo="10.2", bar="a=b", baz="a?b", quux="a@@ b")
assert bus.format_url_query_arguments("url", args) == "url?foo=10.2&bar=a%3Db&baz=a%3Fb&quux=a%40%40+b"
| Test_format_url_query_arguments |
python | astropy__astropy | astropy/utils/data_info.py | {
"start": 5898,
"end": 6371
} | class ____:
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner_cls):
if instance is None:
return self
return getattr(instance._parent, self.attr)
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError("cannot set unbound descriptor")
setattr(instance._parent, self.attr, value)
| ParentAttribute |
python | huggingface__transformers | tests/models/instructblip/test_modeling_instructblip.py | {
"start": 7900,
"end": 10877
} | class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
bos_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
qformer_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
qformer_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask, qformer_input_ids, qformer_attention_mask
def get_config(self):
return InstructBlipQFormerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
)
# this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py
| InstructBlipQFormerModelTester |
python | ray-project__ray | rllib/models/torch/modules/relative_multi_head_attention.py | {
"start": 327,
"end": 1452
} | class ____(nn.Module):
"""Creates a [seq_length x seq_length] matrix for rel. pos encoding.
Denoted as Phi in [2] and [3]. Phi is the standard sinusoid encoding
matrix.
Args:
seq_length: The max. sequence length (time axis).
out_dim: The number of nodes to go into the first Tranformer
layer with.
Returns:
torch.Tensor: The encoding matrix Phi.
"""
def __init__(self, out_dim, **kwargs):
super().__init__()
self.out_dim = out_dim
out_range = torch.arange(0, self.out_dim, 2.0)
inverse_freq = 1 / (10000 ** (out_range / self.out_dim))
self.register_buffer("inverse_freq", inverse_freq)
def forward(self, seq_length):
pos_input = torch.arange(seq_length - 1, -1, -1.0, dtype=torch.float).to(
self.inverse_freq.device
)
sinusoid_input = torch.einsum("i,j->ij", pos_input, self.inverse_freq)
pos_embeddings = torch.cat(
[torch.sin(sinusoid_input), torch.cos(sinusoid_input)], dim=-1
)
return pos_embeddings[:, None, :]
| RelativePositionEmbedding |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-stay-in-the-same-place-after-some-steps.py | {
"start": 61,
"end": 543
} | class ____(object):
def numWays(self, steps, arrLen):
"""
:type steps: int
:type arrLen: int
:rtype: int
"""
MOD = int(1e9+7)
l = min(1+steps//2, arrLen)
dp = [0]*(l+2)
dp[1] = 1
while steps > 0:
steps -= 1
new_dp = [0]*(l+2)
for i in xrange(1, l+1):
new_dp[i] = (dp[i] + dp[i-1] + dp[i+1]) % MOD
dp = new_dp
return dp[1]
| Solution |
python | ray-project__ray | python/ray/serve/_private/utils.py | {
"start": 15789,
"end": 15935
} | class ____(ABC):
@abstractmethod
def time(self) -> float:
"""Return the current time."""
raise NotImplementedError
| TimerBase |
python | bokeh__bokeh | src/bokeh/models/textures.py | {
"start": 1553,
"end": 1889
} | class ____(Model):
''' Base class for ``Texture`` models that represent fill patterns.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
repetition = Enum(TextureRepetition, default="repeat", help="""
""")
| Texture |
python | getsentry__sentry | src/sentry/digests/backends/dummy.py | {
"start": 285,
"end": 968
} | class ____(Backend):
def add(
self,
key: str,
record: "Record",
increment_delay: int | None = None,
maximum_delay: int | None = None,
timestamp: float | None = None,
) -> bool:
return False
def enabled(self, project: "Project") -> bool:
return False
@contextmanager
def digest(self, key: str, minimum_delay: int | None = None) -> Any:
yield []
def schedule(
self, deadline: float, timestamp: float | None = None
) -> Iterable["ScheduleEntry"]:
yield from ()
def maintenance(self, deadline: float, timestamp: float | None = None) -> None:
pass
| DummyBackend |
python | dask__distributed | distributed/comm/core.py | {
"start": 8695,
"end": 12721
} | class ____(ABC):
@abstractmethod
async def connect(self, address, deserialize=True):
"""
Connect to the given address and return a Comm object.
This function returns a coroutine. It may raise EnvironmentError
if the other endpoint is unreachable or unavailable. It
may raise ValueError if the address is malformed.
"""
async def connect(
addr, timeout=None, deserialize=True, handshake_overrides=None, **connection_args
):
"""
Connect to the given address (a URI such as ``tcp://127.0.0.1:1234``)
and yield a ``Comm`` object. If the connection attempt fails, it is
retried until the *timeout* is expired.
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, default="seconds")
scheme, loc = parse_address(addr)
backend = registry.get_backend(scheme)
connector = backend.get_connector()
comm = None
start = time()
def time_left():
deadline = start + timeout
return max(0, deadline - time())
backoff_base = 0.01
attempt = 0
logger.debug("Establishing connection to %s", loc)
# Prefer multiple small attempts than one long attempt. This should protect
# primarily from DNS race conditions
# gh3104, gh4176, gh4167
intermediate_cap = timeout / 5
active_exception = None
while time_left() > 0:
try:
comm = await wait_for(
connector.connect(loc, deserialize=deserialize, **connection_args),
timeout=min(intermediate_cap, time_left()),
)
break
except FatalCommClosedError:
raise
# Note: CommClosed inherits from OSError
except (asyncio.TimeoutError, OSError) as exc:
active_exception = exc
# As described above, the intermediate timeout is used to distributed
# initial, bulk connect attempts homogeneously. In particular with
# the jitter upon retries we should not be worred about overloading
# any more DNS servers
intermediate_cap = timeout
# FullJitter see https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
upper_cap = min(time_left(), backoff_base * (2**attempt))
backoff = random.uniform(0, upper_cap)
attempt += 1
logger.debug(
"Could not connect to %s, waiting for %s before retrying", loc, backoff
)
await asyncio.sleep(backoff)
else:
raise OSError(
f"Timed out trying to connect to {addr} after {timeout} s"
) from active_exception
local_info = {
**comm.handshake_info(),
**(handshake_overrides or {}),
}
await comm.write(local_info)
handshake = await comm.read()
comm.remote_info = handshake
comm.remote_info["address"] = comm._peer_addr
comm.local_info = local_info
comm.local_info["address"] = comm._local_addr
comm.handshake_options = comm.handshake_configuration(
comm.local_info, comm.remote_info
)
logger.debug("Connection to %s established", loc)
return comm
def listen(addr, handle_comm, deserialize=True, **kwargs):
"""
Create a listener object with the given parameters. When its ``start()``
method is called, the listener will listen on the given address
(a URI such as ``tcp://0.0.0.0``) and call *handle_comm* with a
``Comm`` object for each incoming connection.
*handle_comm* can be a regular function or a coroutine.
"""
try:
scheme, loc = parse_address(addr, strict=True)
except ValueError:
if kwargs.get("ssl_context"):
addr = "tls://" + addr
else:
addr = "tcp://" + addr
scheme, loc = parse_address(addr, strict=True)
backend = registry.get_backend(scheme)
return backend.get_listener(loc, handle_comm, deserialize, **kwargs)
| Connector |
python | huggingface__transformers | tests/models/moshi/test_modeling_moshi.py | {
"start": 2301,
"end": 5085
} | class ____:
def __init__(
self,
parent,
batch_size=4, # need batch_size != num_hidden_layers
seq_length=7,
is_training=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="silu",
rms_norm_eps=0.001,
ffn_dim=32,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=100,
pad_token_id=25,
num_codebooks=4,
audio_encoder_type="mimi",
attn_implementation="eager",
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.rms_norm_eps = rms_norm_eps
self.ffn_dim = ffn_dim
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.pad_token_id = pad_token_id
self.num_codebooks = num_codebooks
self.audio_encoder_type = audio_encoder_type
self.attn_implementation = attn_implementation
def prepare_config_and_inputs(self, batch_size=None):
batch_size = self.batch_size if batch_size is None else batch_size
input_ids = ids_tensor([batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
attention_mask = input_ids.ne(self.pad_token_id)
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def get_config(self):
config = MoshiConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
d_ff=self.intermediate_size,
num_codebooks=self.num_codebooks,
rms_norm_eps=self.rms_norm_eps,
tie_word_embeddings=False,
pad_token_id=self.pad_token_id,
ffn_dim=self.ffn_dim,
audio_encoder_config={"model_type": self.audio_encoder_type},
attn_implementation=self.attn_implementation,
)
return config
def prepare_config_and_inputs_for_common(self, batch_size=None):
config, inputs_dict = self.prepare_config_and_inputs(batch_size)
return config, inputs_dict
@require_torch
| MoshiDecoderTester |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 5880,
"end": 7407
} | class ____(Pool):
"""One-dimensional downsample using an average over a sliding window."""
def __init__(
self,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = 1,
padding: int | Sequence[int] | Sequence[tuple[int, int]] = 0,
use_ceil: bool = False,
):
"""**Arguments:**
- `kernel_size`: The size of the convolutional kernel.
- `stride`: The stride of the convolution.
- `padding`: The amount of padding to apply before and after each
spatial dimension.
- `use_ceil`: If `True`, then `ceil` is used to compute the final output
shape instead of `floor`. For `ceil`, if required, extra padding is added.
Defaults to `False`.
"""
super().__init__(
init=0,
operation=lax.add,
num_spatial_dims=1,
kernel_size=kernel_size,
stride=stride,
padding=padding,
use_ceil=use_ceil,
)
@named_scope("eqx.nn.AvgPool1d")
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape `(channels, dim)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(channels, new_dim)`.
"""
return super().__call__(x) / math.prod(self.kernel_size)
| AvgPool1d |
python | sympy__sympy | sympy/geometry/line.py | {
"start": 38759,
"end": 45194
} | class ____(LinearEntity):
"""A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point
The source of the Ray
p2 : Point or radian value
This point determines the direction in which the Ray propagates.
If given as an angle it is interpreted in radians with the positive
direction being ccw.
Attributes
==========
source
See Also
========
sympy.geometry.line.Ray2D
sympy.geometry.line.Ray3D
sympy.geometry.point.Point
sympy.geometry.line.Line
Notes
=====
`Ray` will automatically subclass to `Ray2D` or `Ray3D` based on the
dimension of `p1`.
Examples
========
>>> from sympy import Ray, Point, pi
>>> r = Ray(Point(2, 3), Point(3, 5))
>>> r
Ray2D(Point2D(2, 3), Point2D(3, 5))
>>> r.points
(Point2D(2, 3), Point2D(3, 5))
>>> r.source
Point2D(2, 3)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.slope
2
>>> Ray(Point(0, 0), angle=pi/4).slope
1
"""
def __new__(cls, p1, p2=None, **kwargs):
p1 = Point(p1)
if p2 is not None:
p1, p2 = Point._normalize_dimension(p1, Point(p2))
dim = len(p1)
if dim == 2:
return Ray2D(p1, p2, **kwargs)
elif dim == 3:
return Ray3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" '
'marker-start="url(#markerCircle)" marker-end="url(#markerArrow)"/>'
).format(2.*scale_factor, path, fill_color)
def contains(self, other):
"""
Is other GeometryEntity contained in this Ray?
Examples
========
>>> from sympy import Ray,Point,Segment
>>> p1, p2 = Point(0, 0), Point(4, 4)
>>> r = Ray(p1, p2)
>>> r.contains(p1)
True
>>> r.contains((1, 1))
True
>>> r.contains((1, 3))
False
>>> s = Segment((1, 1), (2, 2))
>>> r.contains(s)
True
>>> s = Segment((1, 2), (2, 5))
>>> r.contains(s)
False
>>> r1 = Ray((2, 2), (3, 3))
>>> r.contains(r1)
True
>>> r1 = Ray((2, 2), (3, 5))
>>> r.contains(r1)
False
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if isinstance(other, Point):
if Point.is_collinear(self.p1, self.p2, other):
# if we're in the direction of the ray, our
# direction vector dot the ray's direction vector
# should be non-negative
return bool((self.p2 - self.p1).dot(other - self.p1) >= S.Zero)
return False
elif isinstance(other, Ray):
if Point.is_collinear(self.p1, self.p2, other.p1, other.p2):
return bool((self.p2 - self.p1).dot(other.p2 - other.p1) > S.Zero)
return False
elif isinstance(other, Segment):
return other.p1 in self and other.p2 in self
# No other known entity can be contained in a Ray
return False
def distance(self, other):
"""
Finds the shortest distance between the ray and a point.
Raises
======
NotImplementedError is raised if `other` is not a Point
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> s = Ray(p1, p2)
>>> s.distance(Point(-1, -1))
sqrt(2)
>>> s.distance((-1, 2))
3*sqrt(2)/2
>>> p1, p2 = Point(0, 0, 0), Point(1, 1, 2)
>>> s = Ray(p1, p2)
>>> s
Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 2))
>>> s.distance(Point(-1, -1, 2))
4*sqrt(3)/3
>>> s.distance((-1, -1, 2))
4*sqrt(3)/3
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if self.contains(other):
return S.Zero
proj = Line(self.p1, self.p2).projection(other)
if self.contains(proj):
return abs(other - proj)
else:
return abs(other - self.source)
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Ray):
return False
return self.source == other.source and other.p2 in self
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ray. Gives
values that will produce a ray that is 10 units long (where a unit is
the distance between the two points that define the ray).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Ray, pi
>>> r = Ray((0, 0), angle=pi/4)
>>> r.plot_interval()
[t, 0, 10]
"""
t = _symbol(parameter, real=True)
return [t, 0, 10]
@property
def source(self):
"""The point from which the ray emanates.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2 = Point(0, 0), Point(4, 1)
>>> r1 = Ray(p1, p2)
>>> r1.source
Point2D(0, 0)
>>> p1, p2 = Point(0, 0, 0), Point(4, 1, 5)
>>> r1 = Ray(p2, p1)
>>> r1.source
Point3D(4, 1, 5)
"""
return self.p1
| Ray |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 30002,
"end": 31516
} | class ____(MeanMetricWrapper):
"""Calculates how often predictions match integer labels.
```python
acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
```
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `sparse categorical accuracy`: an idempotent operation
that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name='sparse_categorical_accuracy', dtype=None):
super(SparseCategoricalAccuracy, self).__init__(
sparse_categorical_accuracy, name, dtype=dtype)
| SparseCategoricalAccuracy |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/classes.py | {
"start": 203,
"end": 257
} | class ____:
def __new__(cls, x, y):
pass
| Baz |
python | pennersr__django-allauth | tests/apps/socialaccount/base.py | {
"start": 1300,
"end": 5207
} | class ____:
provider_id: str
def get_mocked_response(self):
pass
def get_expected_to_str(self):
raise NotImplementedError
def setUp(self):
super(OAuthTestsMixin, self).setUp()
self.app = setup_app(self.provider_id)
request = RequestFactory().get("/")
self.provider = self.app.get_provider(request)
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=False)
def test_login(self):
resp_mocks = self.get_mocked_response()
if resp_mocks is None:
warnings.warn("Cannot test provider %s, no oauth mock" % self.provider.id)
return
resp = self.login(resp_mocks)
self.assertRedirects(resp, reverse("socialaccount_signup"))
resp = self.client.get(reverse("socialaccount_signup"))
sociallogin = resp.context["form"].sociallogin
data = dict(
email=user_email(sociallogin.user),
username=str(random.randrange(1000, 10000000)),
)
resp = self.client.post(reverse("socialaccount_signup"), data=data)
self.assertRedirects(resp, "/accounts/profile/", fetch_redirect_response=False)
user = resp.context["user"]
self.assertFalse(user.has_usable_password())
account = SocialAccount.objects.get(user=user, provider=self.provider.id)
provider_account = account.get_provider_account()
self.assertEqual(provider_account.to_str(), self.get_expected_to_str())
# The following lines don't actually test that much, but at least
# we make sure that the code is hit.
provider_account.get_avatar_url()
provider_account.get_profile_url()
provider_account.get_brand()
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
SOCIALACCOUNT_EMAIL_REQUIRED=False,
ACCOUNT_EMAIL_REQUIRED=False,
)
def test_auto_signup(self):
resp_mocks = self.get_mocked_response()
if not resp_mocks:
warnings.warn("Cannot test provider %s, no oauth mock" % self.provider.id)
return
resp = self.login(resp_mocks)
self.assertRedirects(resp, "/accounts/profile/", fetch_redirect_response=False)
self.assertFalse(resp.context["user"].has_usable_password())
def login(self, resp_mocks, process="login"):
with mocked_response(
MockedResponse(
HTTPStatus.OK,
"oauth_token=token&oauth_token_secret=psst",
{"content-type": "text/html"},
)
):
resp = self.client.post(
reverse(self.provider.id + "_login")
+ "?"
+ urlencode(dict(process=process))
)
p = urlparse(resp["location"])
q = parse_qs(p.query)
complete_url = reverse(self.provider.id + "_callback")
self.assertGreater(q["oauth_callback"][0].find(complete_url), 0)
with mocked_response(self.get_access_token_response(), *resp_mocks):
resp = self.client.get(complete_url)
return resp
def get_access_token_response(self):
return MockedResponse(
HTTPStatus.OK,
"oauth_token=token&oauth_token_secret=psst",
{"content-type": "text/html"},
)
def test_authentication_error(self):
resp = self.client.get(reverse(self.provider.id + "_callback"))
self.assertTemplateUsed(
resp,
"socialaccount/authentication_error.%s"
% getattr(settings, "ACCOUNT_TEMPLATE_EXTENSION", "html"),
)
# For backward-compatibility with third-party provider tests that call
# create_oauth_tests() rather than using the mixin directly.
def create_oauth_tests(provider):
class Class(OAuthTestsMixin, TestCase):
provider_id = provider.id
Class.__name__ = "OAuthTests_" + provider.id
return Class
| OAuthTestsMixin |
python | pytorch__pytorch | test/test_multiprocessing.py | {
"start": 6285,
"end": 8493
} | class ____:
def __init__(self, test_case):
self.checked_pids = [os.getpid()]
self.test_case = test_case
def __enter__(self):
self.next_fds = self._get_next_fds(10)
return self
def __exit__(self, *args):
if torch.cuda.is_available():
torch.cuda.ipc_collect()
if args[0] is None:
# Check that the 10th available file-descriptor at the end of the
# test is no more than 4 higher than the 10th available at the
# start. This attempts to catch file descriptor leaks, but allows
# one-off initialization that may use up a file descriptor
# TODO: Disabled because this check is too flaky
# available_fds = self._get_next_fds(10)
# self.test_case.assertLessEqual(
# available_fds[-1] - self.next_fds[-1], 5)
self.test_case.assertFalse(self.has_shm_files())
return False
def check_pid(self, pid):
self.checked_pids.append(pid)
def _get_next_fds(self, n=1):
# dup uses the lowest-numbered unused descriptor for the new descriptor
fds = [os.dup(0) for i in range(n)]
for fd in fds:
os.close(fd)
return fds
def has_shm_files(self, wait=True):
if not HAS_SHM_FILES:
return False
result = self._has_shm_files()
if not result or mp.get_sharing_strategy() != "file_system" or not wait:
return result
total_waiting_time = 0
waiting_time = 0.5
while total_waiting_time <= MAX_WAITING_TIME_IN_SECONDS and result:
time.sleep(waiting_time)
total_waiting_time += waiting_time
result = self._has_shm_files()
return result
def _has_shm_files(self):
gc.collect()
names = ["torch_" + str(pid) for pid in self.checked_pids]
for filename in os.listdir("/dev/shm"):
for name in names:
if filename.startswith(name):
return True
return False
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
| leak_checker |
python | doocs__leetcode | solution/3600-3699/3628.Maximum Number of Subsequences After One Inserting/Solution.py | {
"start": 0,
"end": 566
} | class ____:
def numOfSubsequences(self, s: str) -> int:
def calc(t: str) -> int:
cnt = a = 0
for c in s:
if c == t[1]:
cnt += a
a += int(c == t[0])
return cnt
l, r = 0, s.count("T")
ans = mx = 0
for c in s:
r -= int(c == "T")
if c == "C":
ans += l * r
l += int(c == "L")
mx = max(mx, l * r)
mx = max(mx, calc("LC"), calc("CT"))
ans += mx
return ans
| Solution |
python | Textualize__textual | docs/examples/styles/align.py | {
"start": 64,
"end": 336
} | class ____(App):
CSS_PATH = "align.tcss"
def compose(self):
yield Label("Vertical alignment with [b]Textual[/]", classes="box")
yield Label("Take note, browsers.", classes="box")
if __name__ == "__main__":
app = AlignApp()
app.run()
| AlignApp |
python | sympy__sympy | sympy/solvers/diophantine/diophantine.py | {
"start": 24995,
"end": 29759
} | class ____(DiophantineEquationType):
"""
Representation of a homogeneous ternary quadratic diophantine equation.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine.diophantine import HomogeneousTernaryQuadratic
>>> HomogeneousTernaryQuadratic(x**2 + y**2 - 3*z**2 + x*y).solve()
{(-1, 2, 1)}
>>> HomogeneousTernaryQuadratic(3*x**2 + y**2 - 3*z**2 + 5*x*y + y*z).solve()
{(3, 12, 13)}
"""
name = 'homogeneous_ternary_quadratic'
def matches(self):
if not (self.total_degree == 2 and self.dimension == 3):
return False
if not self.homogeneous:
return False
if not self.homogeneous_order:
return False
nonzero = [k for k in self.coeff if self.coeff[k]]
return not (len(nonzero) == 3 and all(i**2 in nonzero for i in self.free_symbols))
def solve(self, parameters=None, limit=None):
self.pre_solve(parameters)
_var = self.free_symbols
coeff = self.coeff
x, y, z = _var
var = [x, y, z]
# Equations of the form B*x*y + C*z*x + E*y*z = 0 and At least two of the
# coefficients A, B, C are non-zero.
# There are infinitely many solutions for the equation.
# Ex: (0, 0, t), (0, t, 0), (t, 0, 0)
# Equation can be re-written as y*(B*x + E*z) = -C*x*z and we can find rather
# unobvious solutions. Set y = -C and B*x + E*z = x*z. The latter can be solved by
# using methods for binary quadratic diophantine equations. Let's select the
# solution which minimizes |x| + |z|
result = DiophantineSolutionSet(var, parameters=self.parameters)
def unpack_sol(sol):
if len(sol) > 0:
return list(sol)[0]
return None, None, None
if not any(coeff[i**2] for i in var):
if coeff[x*z]:
sols = diophantine(coeff[x*y]*x + coeff[y*z]*z - x*z)
s = min(sols, key=lambda r: abs(r[0]) + abs(r[1]))
result.add(_remove_gcd(s[0], -coeff[x*z], s[1]))
return result
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = unpack_sol(_diop_ternary_quadratic(var, coeff))
if x_0 is not None:
result.add((x_0, y_0, z_0))
return result
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = unpack_sol(_diop_ternary_quadratic(var, coeff))
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = unpack_sol(_diop_ternary_quadratic(var, coeff))
else:
if coeff[x*y] or coeff[x*z]:
# Apply the transformation x --> X - (B*y + C*z)/(2*A)
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = {}
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
x_0, y_0, z_0 = unpack_sol(_diop_ternary_quadratic(var, _coeff))
if x_0 is None:
return result
p, q = _rational_pq(B*y_0 + C*z_0, 2*A)
x_0, y_0, z_0 = x_0*q - p, y_0*q, z_0*q
elif coeff[z*y] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
A = coeff[x**2]
E = coeff[y*z]
b, a = _rational_pq(-E, A)
x_0, y_0, z_0 = b, a, b
else:
# Ax**2 + E*y*z + F*z**2 = 0
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = unpack_sol(_diop_ternary_quadratic(var, coeff))
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, C may be zero
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = unpack_sol(_diop_ternary_quadratic(var, coeff))
else:
# Ax**2 + D*y**2 + F*z**2 = 0, C may be zero
x_0, y_0, z_0 = unpack_sol(_diop_ternary_quadratic_normal(var, coeff))
if x_0 is None:
return result
result.add(_remove_gcd(x_0, y_0, z_0))
return result
| HomogeneousTernaryQuadratic |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/assertsql.py | {
"start": 494,
"end": 818
} | class ____:
is_consumed = False
errormessage = None
consume_statement = True
def process_statement(self, execute_observed):
pass
def no_more_statements(self):
assert False, (
"All statements are complete, but pending "
"assertion rules remain"
)
| AssertRule |
python | django__django | tests/cache/tests.py | {
"start": 51427,
"end": 52375
} | class ____(TestCase):
databases = {"default", "other"}
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using="default"):
management.call_command("createcachetable", database="default", verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections["other"].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using="other"):
management.call_command("createcachetable", database="other", verbosity=0)
| CreateCacheTableForDBCacheTests |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/composite_tensor_ops_test.py | {
"start": 1702,
"end": 6338
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters([
('Ragged', lambda: ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])),
('Sparse', lambda: sparse_ops.from_dense([[0, 0, 3, 0], [1, 2, 0, 0]])),
])
def testEncodeAndDecode(self, value_factory):
value = value_factory()
encoded = composite_tensor_ops.composite_tensor_to_variants(value)
self.assertEqual(encoded.dtype, dtypes.variant)
self.assertEqual(encoded.shape.rank, 0)
decoded = composite_tensor_ops.composite_tensor_from_variant(
encoded, value._type_spec)
self.assertTrue(value._type_spec.is_compatible_with(decoded._type_spec))
value_components = nest.flatten(value, expand_composites=True)
decoded_components = nest.flatten(decoded, expand_composites=True)
self.assertLen(value_components, len(decoded_components))
for v, d in zip(value_components, decoded_components):
self.assertAllEqual(v, d)
@parameterized.named_parameters([
('WrongType', lambda: ragged_factory_ops.constant([[1]]),
sparse_tensor.SparseTensorSpec([None, None], dtypes.int32),
r'Expected a SPARSE_TENSOR_SPEC \(based on `type_spec`\), but `encoded` '
'contains a RAGGED_TENSOR_SPEC'),
('WrongNumComponents', lambda: ragged_factory_ops.constant([[1]]),
ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32),
'Encoded value has 2 tensor components; expected 3 components'),
('WrongDType', lambda: ragged_factory_ops.constant([[1]]),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.float32),
'Tensor component 0 had dtype DT_INT32; expected dtype DT_FLOAT'),
])
def testDecodingErrors(self, value, spec, message):
encoded = composite_tensor_ops.composite_tensor_to_variants(value())
with self.assertRaisesRegex(errors.InvalidArgumentError, message):
self.evaluate(
composite_tensor_ops.composite_tensor_from_variant(encoded, spec))
@parameterized.named_parameters([
('IncompatibleSpec', lambda: ragged_factory_ops.constant([[1]]),
ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32),
r'`type_spec` .* is not compatible with `value` .*'),
])
def testEncodingErrors(self, value, spec, message):
with self.assertRaisesRegex(ValueError, message):
composite_tensor_ops.composite_tensor_to_variants(value(), spec)
def testDecodingEmptyNonScalarTensorError(self):
if not context.executing_eagerly():
# Creating a variant tensor of an empty list is not allowed in eager mode.
return
with self.assertRaisesRegex(errors.InvalidArgumentError,
'must not be an empty variant tensor'):
gen_composite_tensor_ops.CompositeTensorVariantToComponents(
encoded=constant_op.constant([], dtype=dtypes.variant),
metadata='',
Tcomponents=[dtypes.int32])
def testDecodingInvalidEncodedInputError(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not a valid CompositeTensorVariant tensor'):
self.evaluate(
gen_composite_tensor_ops.CompositeTensorVariantToComponents(
encoded=gen_list_ops.EmptyTensorList(
element_dtype=dtypes.int32,
element_shape=[1, 2],
max_num_elements=2),
metadata='',
Tcomponents=[dtypes.int32]))
def testRoundTripThroughTensorProto(self):
value = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
encoded = composite_tensor_ops.composite_tensor_to_variants(value)
proto = parsing_ops.SerializeTensor(tensor=encoded)
parsed = parsing_ops.ParseTensor(serialized=proto, out_type=dtypes.variant)
decoded = composite_tensor_ops.composite_tensor_from_variant(
parsed, value._type_spec)
self.assertAllEqual(value, decoded)
def testGradient(self):
def func(x):
x2 = composite_tensor_ops.composite_tensor_to_variants(x * 2)
x3 = composite_tensor_ops.composite_tensor_from_variant(x2, x._type_spec)
return x3.with_values(x3.values * math_ops.range(6.0))
x = ragged_factory_ops.constant([[1.0, 2.0, 3.0], [4.0], [5.0, 6.0]])
if context.executing_eagerly():
with backprop.GradientTape() as t:
t.watch(x.values)
y = func(x)
g = t.gradient(y.values, x.values)
else:
y = func(x)
g = gradients_impl.gradients(ys=y.values, xs=x.values)[0]
self.assertAllClose(g, [0.0, 2.0, 4.0, 6.0, 8.0, 10.0])
if __name__ == '__main__':
googletest.main()
| ExtensionTypeTest |
python | django-haystack__django-haystack | test_haystack/test_indexes.py | {
"start": 654,
"end": 1012
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
content2 = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return MockModel
| BadSearchIndex2 |
python | PyCQA__pylint | tests/functional/c/class_members_py30.py | {
"start": 1081,
"end": 1277
} | class ____(metaclass=Metaclass):
""" empty """
TestMetaclass.register(int)
UsingMetaclass.test()
TestMetaclass().register(int) # [no-member]
UsingMetaclass().test() # [no-member]
| UsingMetaclass |
python | huggingface__transformers | tests/models/evolla/test_modeling_evolla.py | {
"start": 12927,
"end": 15015
} | class ____(TestCasePlus):
def _prepare_for_inputs(self):
aa_seq = "MLLEETLKSCPIVKRGKYHYFIHPISDGVPLVEPKLLREVATRIIKIGNFEGVNKIVTAEAMGIPLVTTLSLYTDIPYVIMRKREYKLPGEVPVFQSTGYSKGQLYLNGIEKGDKVIIIDDVISTGGTMIAIINALERAGAEIKDIICVIERGDGKKIVEEKTGYKIKTLVKIDVVDGEVVIL"
foldseek = "dvvvvqqqpfawdddppdtdgcgclapvpdpddpvvlvvllvlcvvpadpvqaqeeeeeddscpsnvvsncvvpvhyydywylddppdppkdwqwf######gitidpdqaaaheyeyeeaeqdqlrvvlsvvvrcvvrnyhhrayeyaeyhycnqvvccvvpvghyhynwywdqdpsgidtd"
question = "What is the function of this protein?"
protein_information = {
"aa_seq": aa_seq,
"foldseek": foldseek,
}
messages = [
{"role": "system", "content": "You are an AI expert that can answer any questions about protein."},
{"role": "user", "content": question},
]
return protein_information, messages
@cached_property
def default_processor(self):
return EvollaProcessor.from_pretrained("westlake-repl/Evolla-10B-hf")
@require_bitsandbytes
@slow
def test_inference_natural_language_protein_reasoning(self):
protein_information, messages = self._prepare_for_inputs()
processor = self.default_processor
inputs = processor(
messages_list=[messages], proteins=[protein_information], return_tensors="pt", padding="longest"
).to(torch_device)
# the CI gpu is small so using quantization to fit
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype="float16",
)
model = EvollaForProteinText2Text.from_pretrained(
"westlake-repl/Evolla-10B-hf",
quantization_config=quantization_config,
device_map=torch_device,
)
generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertIn("This protein", generated_text[0])
self.assertIn("purine", generated_text[0])
| EvollaModelIntegrationTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec20.py | {
"start": 1031,
"end": 1393
} | class ____(Generic[P1]):
f: Callable[P1, int]
def z1(x: Z[[int, str, bool]]) -> str: ...
def z2(x: Z[int, str, bool]) -> str: ...
# This should generate an error.
def z3(x: Z[[int, [str], bool]]) -> str: ...
# This should generate an error.
def z4(x: Z[[[int, str, bool]]]) -> str: ...
# This should generate an error.
def z5(x: Z[[...]]) -> str: ...
| Z |
python | pennersr__django-allauth | allauth/socialaccount/providers/snapchat/views.py | {
"start": 380,
"end": 1945
} | class ____(OAuth2Adapter):
provider_id = PROVIDER_ID
access_token_url = "https://accounts.snapchat.com/accounts/oauth2/token" # nosec
authorize_url = "https://accounts.snapchat.com/accounts/oauth2/auth"
identity_url = "https://api.snapkit.com/v1/me"
def complete_login(self, request, app, token, **kwargs):
extra_data = self.get_data(token.token)
return self.get_provider().sociallogin_from_response(request, extra_data)
def get_data(self, token):
settings = app_settings.PROVIDERS.get(self.provider_id, {})
provider_scope = settings.get(
"SCOPE",
"['https://auth.snapchat.com/oauth2/api/user.external_id', 'https://auth.snapchat.com/oauth2/api/user.display_name']",
)
hed = {
"Authorization": "Bearer " + token,
"Content-Type": "application/json;charset=UTF-8",
}
if Scope.BITMOJI in provider_scope:
data = {"query": "{ me { externalId displayName bitmoji { avatar id } } }"}
else:
data = {"query": "{ me { externalId displayName } }"}
resp = (
get_adapter()
.get_requests_session()
.post(self.identity_url, headers=hed, json=data)
)
resp.raise_for_status()
resp = resp.json()
if not resp.get("data"):
raise OAuth2Error()
return resp
oauth2_login = OAuth2LoginView.adapter_view(SnapchatOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(SnapchatOAuth2Adapter)
| SnapchatOAuth2Adapter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-microsoft-onedrive/source_microsoft_onedrive/spec.py | {
"start": 2574,
"end": 5296
} | class ____(AbstractFileBasedSpec, BaseModel):
"""
SourceMicrosoftOneDriveSpec class for Microsoft OneDrive Source Specification.
This class combines the authentication details with additional configuration for the OneDrive API.
"""
class Config:
title = "Microsoft OneDrive Source Spec"
# Union type for credentials, allowing for either OAuth or Service Key authentication
credentials: Union[OAuthCredentials, ServiceCredentials] = Field(
title="Authentication",
description="Credentials for connecting to the One Drive API",
discriminator="auth_type",
type="object",
order=0,
)
drive_name: Optional[str] = Field(
title="Drive Name", description="Name of the Microsoft OneDrive drive where the file(s) exist.", default="OneDrive", order=2
)
search_scope: str = Field(
title="Search Scope",
description="Specifies the location(s) to search for files. Valid options are 'ACCESSIBLE_DRIVES' to search in the selected OneDrive drive, 'SHARED_ITEMS' for shared items the user has access to, and 'ALL' to search both.",
default="ALL",
enum=["ACCESSIBLE_DRIVES", "SHARED_ITEMS", "ALL"],
order=3,
)
folder_path: str = Field(
title="Folder Path",
description="Path to a specific folder within the drives to search for files. Leave empty to search all folders of the drives. This does not apply to shared items.",
order=4,
default=".",
)
@classmethod
def documentation_url(cls) -> str:
"""Provides the URL to the documentation for this specific source."""
return "https://docs.airbyte.com/integrations/sources/one-drive"
@classmethod
def schema(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]:
"""
Generates the schema mapping for configuration fields.
It also cleans up the schema by removing legacy settings and discriminators.
"""
schema = super().schema(*args, **kwargs)
# Remove legacy settings related to streams
dpath.util.delete(schema, "properties/streams/items/properties/legacy_prefix")
dpath.util.delete(schema, "properties/streams/items/properties/format/oneOf/*/properties/inference_type")
# Hide API processing option until https://github.com/airbytehq/airbyte-platform-internal/issues/10354 is fixed
processing_options = dpath.util.get(schema, "properties/streams/items/properties/format/oneOf/4/properties/processing/oneOf")
dpath.util.set(schema, "properties/streams/items/properties/format/oneOf/4/properties/processing/oneOf", processing_options[:1])
return schema
| SourceMicrosoftOneDriveSpec |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 125087,
"end": 125410
} | class ____:
xlExponential = 5 # from enum XlTrendlineType
xlLinear = -4132 # from enum XlTrendlineType
xlLogarithmic = -4133 # from enum XlTrendlineType
xlMovingAvg = 6 # from enum XlTrendlineType
xlPolynomial = 3 # from enum XlTrendlineType
xlPower = 4 # from enum XlTrendlineType
| TrendlineType |
python | huggingface__transformers | src/transformers/models/zamba2/modular_zamba2.py | {
"start": 7230,
"end": 12806
} | class ____(ZambaAttention):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
Finally, this attention layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this
layer is tied, un-tied adapters (formally the same as LoRA but used in the base model) modules are added to the q, k, v projectors to increase
expressivity with a small memory overhead (see Fig. 2 of https://huggingface.co/papers/2411.15242).
"""
def __init__(
self,
config: Zamba2Config,
layer_idx: Optional[int] = None,
num_fwd_mem_blocks: Optional[int] = None,
block_id: Optional[int] = None,
):
super().__init__(config, layer_idx)
self.num_fwd_mem_blocks = num_fwd_mem_blocks
self.layer_block_map = config.hybrid_layer_ids
self.block_id = block_id
if config.use_shared_attention_adapter:
self.linear_q_adapter_list = nn.ModuleList([])
self.linear_k_adapter_list = nn.ModuleList([])
self.linear_v_adapter_list = nn.ModuleList([])
for i in range(self.num_fwd_mem_blocks):
if i % config.num_mem_blocks == block_id:
linear_q_adapter = nn.Sequential(
nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
)
linear_k_adapter = nn.Sequential(
nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
)
linear_v_adapter = nn.Sequential(
nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
)
else:
linear_q_adapter = nn.Identity()
linear_k_adapter = nn.Identity()
linear_v_adapter = nn.Identity()
self.linear_q_adapter_list.append(linear_q_adapter)
self.linear_k_adapter_list.append(linear_k_adapter)
self.linear_v_adapter_list.append(linear_v_adapter)
self.layer_dic = {value: index for index, value in enumerate(self.layer_block_map)}
def forward(
self,
hidden_states: torch.Tensor,
layer_idx: int,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
if self.config.use_shared_attention_adapter:
adapter_layer_idx = self.layer_dic[layer_idx]
query_states = query_states + self.linear_q_adapter_list[adapter_layer_idx](hidden_states)
key_states = key_states + self.linear_k_adapter_list[adapter_layer_idx](hidden_states)
value_states = value_states + self.linear_v_adapter_list[adapter_layer_idx](hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
if self.config.use_mem_rope:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
key_states, value_states = past_key_values.update(key_states, value_states, layer_idx)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Zamba2Attention |
python | coleifer__peewee | peewee.py | {
"start": 43866,
"end": 45484
} | class ____(ColumnBase):
def __init__(self, lhs, op, rhs, flat=False):
self.lhs = lhs
self.op = op
self.rhs = rhs
self.flat = flat
def __sql__(self, ctx):
overrides = {'parentheses': not self.flat, 'in_expr': True}
# First attempt to unwrap the node on the left-hand-side, so that we
# can get at the underlying Field if one is present.
node = raw_node = self.lhs
if isinstance(raw_node, WrappedNode):
node = raw_node.unwrap()
# Set up the appropriate converter if we have a field on the left side.
if isinstance(node, Field) and raw_node._coerce:
overrides['converter'] = node.db_value
overrides['is_fk_expr'] = isinstance(node, ForeignKeyField)
else:
overrides['converter'] = None
if ctx.state.operations:
op_sql = ctx.state.operations.get(self.op, self.op)
else:
op_sql = self.op
with ctx(**overrides):
# Postgresql reports an error for IN/NOT IN (), so convert to
# the equivalent boolean expression.
op_in = self.op == OP.IN or self.op == OP.NOT_IN
if op_in and ctx.as_new().parse(self.rhs)[0] == '()':
return ctx.literal('0 = 1' if self.op == OP.IN else '1 = 1')
rhs = self.rhs
if rhs is None and (self.op == OP.IS or self.op == OP.IS_NOT):
rhs = SQL('NULL')
return (ctx
.sql(self.lhs)
.literal(' %s ' % op_sql)
.sql(rhs))
| Expression |
python | kamyu104__LeetCode-Solutions | Python/moving-stones-until-consecutive.py | {
"start": 421,
"end": 1053
} | class ____(object):
def numMovesStones(self, a, b, c):
"""
:type a: int
:type b: int
:type c: int
:rtype: List[int]
"""
stones = [a, b, c]
stones.sort()
left, min_moves = 0, float("inf")
max_moves = (stones[-1]-stones[0]) - (len(stones)-1)
for right in xrange(len(stones)):
while stones[right]-stones[left]+1 > len(stones): # find window size <= len(stones)
left += 1
min_moves = min(min_moves, len(stones)-(right-left+1)) # move stones not in this window
return [min_moves, max_moves]
| Solution2 |
python | apache__airflow | providers/standard/tests/unit/standard/sensors/test_external_task_sensor.py | {
"start": 65251,
"end": 85304
} | class ____:
def test_serialized_fields(self):
assert {"recursion_depth"}.issubset(ExternalTaskMarker.get_serialized_fields())
def test_serialized_external_task_marker(self):
dag = DAG("test_serialized_external_task_marker", schedule=None, start_date=DEFAULT_DATE)
task = ExternalTaskMarker(
task_id="parent_task",
external_dag_id="external_task_marker_child",
external_task_id="child_task1",
dag=dag,
)
serialized_op = SerializedBaseOperator.serialize_operator(task)
deserialized_op = SerializedBaseOperator.deserialize_operator(serialized_op)
assert deserialized_op.task_type == "ExternalTaskMarker"
assert getattr(deserialized_op, "external_dag_id") == "external_task_marker_child"
assert getattr(deserialized_op, "external_task_id") == "child_task1"
@pytest.fixture
def dag_bag_ext():
"""
Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies
set up using ExternalTaskMarker and ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
|
|
dag_1: ---> task_a_1 >> task_b_1
|
|
dag_2: ---> task_a_2 >> task_b_2
|
|
dag_3: ---> task_a_3 >> task_b_3
"""
clear_db_runs()
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule=None)
task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0
)
task_a_0 >> task_b_0
dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule=None)
task_a_1 = ExternalTaskSensor(
task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1
)
task_b_1 = ExternalTaskMarker(
task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1
)
task_a_1 >> task_b_1
dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule=None)
task_a_2 = ExternalTaskSensor(
task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2
)
task_b_2 = ExternalTaskMarker(
task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2
)
task_a_2 >> task_b_2
dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule=None)
task_a_3 = ExternalTaskSensor(
task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3
)
task_b_3 = EmptyOperator(task_id="task_b_3", dag=dag_3)
task_a_3 >> task_b_3
for dag in [dag_0, dag_1, dag_2, dag_3]:
if AIRFLOW_V_3_0_PLUS:
dag_bag.bag_dag(dag=dag)
else:
dag_bag.bag_dag(dag=dag, root_dag=dag)
yield dag_bag
clear_db_runs()
@pytest.fixture
def dag_bag_parent_child():
"""
Create a DagBag with two DAGs looking like this. task_1 of child_dag_1 on day 1 depends on
task_0 of parent_dag_0 on day 1. Therefore, when task_0 of parent_dag_0 on day 1 and day 2
are cleared, parent_dag_0 DagRuns need to be set to running on both days, but child_dag_1
only needs to be set to running on day 1.
day 1 day 2
parent_dag_0 task_0 task_0
|
|
v
child_dag_1 task_1 task_1
"""
clear_db_runs()
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
day_1 = DEFAULT_DATE
with DAG("parent_dag_0", start_date=day_1, schedule=None) as dag_0:
task_0 = ExternalTaskMarker(
task_id="task_0",
external_dag_id="child_dag_1",
external_task_id="task_1",
logical_date=day_1.isoformat(),
recursion_depth=3,
)
with DAG("child_dag_1", start_date=day_1, schedule=None) as dag_1:
ExternalTaskSensor(
task_id="task_1",
external_dag_id=dag_0.dag_id,
external_task_id=task_0.task_id,
execution_date_fn=lambda logical_date: day_1 if logical_date == day_1 else [],
mode="reschedule",
)
for dag in [dag_0, dag_1]:
if AIRFLOW_V_3_0_PLUS:
dag_bag.bag_dag(dag=dag)
else:
dag_bag.bag_dag(dag=dag, root_dag=dag)
yield dag_bag
clear_db_runs()
@provide_session
def run_tasks(
dag_bag: DagBag,
logical_date=DEFAULT_DATE,
session=NEW_SESSION,
) -> tuple[dict[str, DagRun], dict[str, TaskInstance]]:
"""
Run all tasks in the DAGs in the given dag_bag. Return the TaskInstance objects as a dict
keyed by task_id.
"""
runs: dict[str, DagRun] = {}
tis: dict[str, TaskInstance] = {}
for dag in dag_bag.dags.values():
data_interval = DataInterval(coerce_datetime(logical_date), coerce_datetime(logical_date))
if AIRFLOW_V_3_0_PLUS:
runs[dag.dag_id] = dagrun = create_scheduler_dag(dag).create_dagrun(
run_id=dag.timetable.generate_run_id(
run_type=DagRunType.MANUAL,
run_after=logical_date,
data_interval=data_interval,
),
logical_date=logical_date,
data_interval=data_interval,
run_after=logical_date,
run_type=DagRunType.MANUAL,
triggered_by=DagRunTriggeredByType.TEST,
state=DagRunState.RUNNING,
start_date=logical_date,
session=session,
)
else:
runs[dag.dag_id] = dagrun = dag.create_dagrun( # type: ignore[attr-defined,call-arg]
run_id=dag.timetable.generate_run_id( # type: ignore[call-arg]
run_type=DagRunType.MANUAL,
logical_date=logical_date,
data_interval=data_interval,
),
execution_date=logical_date,
data_interval=data_interval,
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
start_date=logical_date,
session=session,
)
# we use sorting by task_id here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
tasks = sorted(dagrun.task_instances, key=lambda ti: ti.task_id)
for ti in tasks:
ti.refresh_from_task(dag.get_task(ti.task_id))
tis[ti.task_id] = ti
ti.run(session=session)
session.flush()
session.merge(ti)
assert_ti_state_equal(ti, State.SUCCESS)
return runs, tis
def assert_ti_state_equal(task_instance, state):
"""
Assert state of task_instances equals the given state.
"""
task_instance.refresh_from_db()
assert task_instance.state == state
@provide_session
def clear_tasks(
dag_bag,
dag,
task,
session,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
dry_run=False,
):
"""
Clear the task and its downstream tasks recursively for the dag in the given dagbag.
"""
partial: DAG = dag.partial_subset(task_ids_or_regex=[task.task_id], include_downstream=True)
return partial.clear(
start_date=start_date,
end_date=end_date,
dag_bag=dag_bag,
dry_run=dry_run,
session=session,
)
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Different test for 3.0+")
def test_external_task_marker_transitive(dag_bag_ext):
"""
Test clearing tasks across DAGs.
"""
_, tis = run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
clear_tasks(dag_bag_ext, dag_0, task_a_0)
ti_a_0 = tis["task_a_0"]
ti_b_3 = tis["task_b_3"]
assert_ti_state_equal(ti_a_0, State.NONE)
assert_ti_state_equal(ti_b_3, State.NONE)
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Different test for 3.0+")
@provide_session
def test_external_task_marker_clear_activate(dag_bag_parent_child, session):
"""
Test clearing tasks across DAGs and make sure the right DagRuns are activated.
"""
dag_bag = dag_bag_parent_child
day_1 = DEFAULT_DATE
day_2 = DEFAULT_DATE + timedelta(days=1)
run_tasks(dag_bag, logical_date=day_1)
run_tasks(dag_bag, logical_date=day_2)
# Assert that dagruns of all the affected dags are set to SUCCESS before tasks are cleared.
for dag, execution_date in itertools.product(dag_bag.dags.values(), [day_1, day_2]):
dagrun = dag.get_dagrun(execution_date=execution_date, session=session)
dagrun.set_state(State.SUCCESS)
session.flush()
dag_0 = dag_bag.get_dag("parent_dag_0")
task_0 = dag_0.get_task("task_0")
clear_tasks(dag_bag, dag_0, task_0, start_date=day_1, end_date=day_2, session=session)
# Assert that dagruns of all the affected dags are set to QUEUED after tasks are cleared.
# Unaffected dagruns should be left as SUCCESS.
dagrun_0_1 = dag_bag.get_dag("parent_dag_0").get_dagrun(execution_date=day_1, session=session)
dagrun_0_2 = dag_bag.get_dag("parent_dag_0").get_dagrun(execution_date=day_2, session=session)
dagrun_1_1 = dag_bag.get_dag("child_dag_1").get_dagrun(execution_date=day_1, session=session)
dagrun_1_2 = dag_bag.get_dag("child_dag_1").get_dagrun(execution_date=day_2, session=session)
assert dagrun_0_1.state == State.QUEUED
assert dagrun_0_2.state == State.QUEUED
assert dagrun_1_1.state == State.QUEUED
assert dagrun_1_2.state == State.SUCCESS
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Different test for 3.0+")
def test_external_task_marker_future(dag_bag_ext):
"""
Test clearing tasks with no end_date. This is the case when users clear tasks with
Future, Downstream and Recursive selected.
"""
date_0 = DEFAULT_DATE
date_1 = DEFAULT_DATE + timedelta(days=1)
_, tis_date_0 = run_tasks(dag_bag_ext, logical_date=date_0)
_, tis_date_1 = run_tasks(dag_bag_ext, logical_date=date_1)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
# This should clear all tasks on dag_0 to dag_3 on both date_0 and date_1
clear_tasks(dag_bag_ext, dag_0, task_a_0, end_date=None)
ti_a_0_date_0 = tis_date_0["task_a_0"]
ti_b_3_date_0 = tis_date_0["task_b_3"]
ti_b_3_date_1 = tis_date_1["task_b_3"]
assert_ti_state_equal(ti_a_0_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_1, State.NONE)
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Different test for 3.0+")
def test_external_task_marker_exception(dag_bag_ext):
"""
Clearing across multiple DAGs should raise AirflowException if more levels are being cleared
than allowed by the recursion_depth of the first ExternalTaskMarker being cleared.
"""
run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
task_b_0 = dag_0.get_task("task_b_0")
task_b_0.recursion_depth = 2
with pytest.raises(AirflowException, match="Maximum recursion depth 2"):
clear_tasks(dag_bag_ext, dag_0, task_a_0)
@pytest.fixture
def dag_bag_cyclic():
"""
Create a DagBag with DAGs having cyclic dependencies set up by ExternalTaskMarker and
ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
^ |
| |
dag_1: | ---> task_a_1 >> task_b_1
| ^
| |
dag_n: | ---> task_a_n >> task_b_n
| |
-----------------------------------------------------
"""
def _factory(depth: int) -> DagBag:
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dags = []
with DAG("dag_0", start_date=DEFAULT_DATE, schedule=None) as dag:
dags.append(dag)
task_a_0 = EmptyOperator(task_id="task_a_0")
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3
)
task_a_0 >> task_b_0
for n in range(1, depth):
with DAG(f"dag_{n}", start_date=DEFAULT_DATE, schedule=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{n}",
external_dag_id=f"dag_{n - 1}",
external_task_id=f"task_b_{n - 1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{n}",
external_dag_id=f"dag_{n + 1}",
external_task_id=f"task_a_{n + 1}",
recursion_depth=3,
)
task_a >> task_b
# Create the last dag which loops back
with DAG(f"dag_{depth}", start_date=DEFAULT_DATE, schedule=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{depth}",
external_dag_id=f"dag_{depth - 1}",
external_task_id=f"task_b_{depth - 1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{depth}",
external_dag_id="dag_0",
external_task_id="task_a_0",
recursion_depth=2,
)
task_a >> task_b
for dag in dags:
if AIRFLOW_V_3_0_PLUS:
sync_dag_to_db(dag)
dag_bag.bag_dag(dag=dag)
else:
dag_bag.bag_dag(dag=dag, root_dag=dag) # type: ignore[call-arg]
return dag_bag
return _factory
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Different test for 3.0+")
def test_external_task_marker_cyclic_deep(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies. AirflowException should be
raised.
"""
dag_bag = dag_bag_cyclic(10)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
with pytest.raises(AirflowException, match="Maximum recursion depth 3"):
clear_tasks(dag_bag, dag_0, task_a_0)
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Different test for 3.0+")
def test_external_task_marker_cyclic_shallow(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies shallower
than recursion_depth
"""
dag_bag = dag_bag_cyclic(2)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
tis = clear_tasks(dag_bag, dag_0, task_a_0, dry_run=True)
assert sorted((ti.dag_id, ti.task_id) for ti in tis) == [
("dag_0", "task_a_0"),
("dag_0", "task_b_0"),
("dag_1", "task_a_1"),
("dag_1", "task_b_1"),
("dag_2", "task_a_2"),
("dag_2", "task_b_2"),
]
@pytest.fixture
def dag_bag_multiple(session):
"""
Create a DagBag containing two DAGs, linked by multiple ExternalTaskMarker.
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
daily_dag = DAG("daily_dag", start_date=DEFAULT_DATE, schedule="@daily")
agg_dag = DAG("agg_dag", start_date=DEFAULT_DATE, schedule="@daily")
if AIRFLOW_V_3_0_PLUS:
dag_bag.bag_dag(dag=daily_dag)
dag_bag.bag_dag(dag=agg_dag)
else:
dag_bag.bag_dag(dag=daily_dag, root_dag=daily_dag)
dag_bag.bag_dag(dag=agg_dag, root_dag=agg_dag)
daily_task = EmptyOperator(task_id="daily_tas", dag=daily_dag)
begin = EmptyOperator(task_id="begin", dag=agg_dag)
for i in range(8):
task = ExternalTaskMarker(
task_id=f"{daily_task.task_id}_{i}",
external_dag_id=daily_dag.dag_id,
external_task_id=daily_task.task_id,
logical_date=f"{{{{ macros.ds_add(ds, -1 * {i}) }}}}",
dag=agg_dag,
)
begin >> task
if AIRFLOW_V_3_0_PLUS:
sync_dags_to_db([agg_dag, daily_dag])
return dag_bag
@pytest.fixture
def dag_bag_head_tail(session):
"""
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous logical_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
with DAG("head_tail", start_date=DEFAULT_DATE, schedule="@daily") as dag:
head = ExternalTaskSensor(
task_id="head",
external_dag_id=dag.dag_id,
external_task_id="tail",
execution_delta=timedelta(days=1),
mode="reschedule",
)
body = EmptyOperator(task_id="body")
tail = ExternalTaskMarker(
task_id="tail",
external_dag_id=dag.dag_id,
external_task_id=head.task_id,
logical_date="{{ macros.ds_add(ds, 1) }}",
)
head >> body >> tail
if AIRFLOW_V_3_0_PLUS:
dag_bag.bag_dag(dag)
sync_dag_to_db(dag)
else:
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
@pytest.fixture
def dag_bag_head_tail_mapped_tasks(session):
"""
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous logical_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
with DAG("head_tail", start_date=DEFAULT_DATE, schedule="@daily") as dag:
@task_deco
def dummy_task(x: int):
return x
head = ExternalTaskSensor(
task_id="head",
external_dag_id=dag.dag_id,
external_task_id="tail",
execution_delta=timedelta(days=1),
mode="reschedule",
)
body = dummy_task.expand(x=range(5))
tail = ExternalTaskMarker(
task_id="tail",
external_dag_id=dag.dag_id,
external_task_id=head.task_id,
logical_date="{{ macros.ds_add(ds, 1) }}",
)
head >> body >> tail
if AIRFLOW_V_3_0_PLUS:
sync_dag_to_db(dag)
else:
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
| TestExternalTaskMarker |
python | scipy__scipy | scipy/interpolate/_interpolate.py | {
"start": 32624,
"end": 49145
} | class ____(_PPolyBase):
"""Piecewise polynomial in the power basis.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k+1, m, ...)
Polynomial coefficients, degree `k` and `m` intervals.
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-D array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self._c.reshape(self._c.shape[0], self._c.shape[1], -1),
self._x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e., compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self._c.copy()
else:
c2 = self._c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
c2 = self._asarray(c2)
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e., compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self._c.shape[0] + nu, self._c.shape[1]) + self._c.shape[2:],
dtype=self._c.dtype)
c[:-nu] = self._c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self._c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self._x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
c = self._asarray(c)
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self._c.shape[2:]),), dtype=self._c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self._x[0], self._x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self._c.reshape(self._c.shape[0], self._c.shape[1], -1),
self._x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self._c.reshape(self._c.shape[0], self._c.shape[1], -1),
self._x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self._c.reshape(self._c.shape[0], self._c.shape[1], -1),
self._x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self._c.reshape(self._c.shape[0], self._c.shape[1], -1),
self._x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self._c.reshape(self._c.shape[0], self._c.shape[1], -1),
self._x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return self._asarray(range_int.reshape(self._c.shape[2:]))
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> import numpy as np
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.solve()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self._c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self._c.reshape(self._c.shape[0], self._c.shape[1], -1),
self._x, y, bool(discontinuity),
bool(extrapolate))
if self._c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self._c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in NumPy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self._c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Examples
--------
Construct an interpolating spline and convert it to a `PPoly` instance
>>> import numpy as np
>>> from scipy.interpolate import splrep, PPoly
>>> x = np.linspace(0, 1, 11)
>>> y = np.sin(2*np.pi*x)
>>> tck = splrep(x, y, s=0)
>>> p = PPoly.from_spline(tck)
>>> isinstance(p, PPoly)
True
Note that this function only supports 1D splines out of the box.
If the ``tck`` object represents a parametric spline (e.g. constructed
by `splprep` or a `BSpline` with ``c.ndim > 1``), you will need to loop
over the dimensions manually.
>>> from scipy.interpolate import splprep, splev
>>> t = np.linspace(0, 1, 11)
>>> x = np.sin(2*np.pi*t)
>>> y = np.cos(2*np.pi*t)
>>> (t, c, k), u = splprep([x, y], s=0)
Note that ``c`` is a list of two arrays of length 11.
>>> unew = np.arange(0, 1.01, 0.01)
>>> out = splev(unew, (t, c, k))
To convert this spline to the power basis, we convert each
component of the list of b-spline coefficients, ``c``, into the
corresponding cubic polynomial.
>>> polys = [PPoly.from_spline((t, cj, k)) for cj in c]
>>> polys[0].c.shape
(4, 14)
Note that the coefficients of the polynomials `polys` are in the
power basis and their dimensions reflect just that: here 4 is the order
(degree+1), and 14 is the number of intervals---which is nothing but
the length of the knot array of the original `tck` minus one.
Optionally, we can stack the components into a single `PPoly` along
the third dimension:
>>> cc = np.dstack([p.c for p in polys]) # has shape = (4, 14, 2)
>>> poly = PPoly(cc, polys[0].x)
>>> np.allclose(poly(unew).T, # note the transpose to match `splev`
... out, atol=1e-15)
True
"""
if isinstance(tck, BSpline):
t, c, k = tck._t, tck._c, tck.k
_asarray = tck._asarray
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
_asarray = np.asarray
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in range(k, -1, -1):
y = _fitpack_py.splev(t[:-1], (t, c, k), der=m)
cvals[k - m, :] = y / spec.gamma(m+1)
return cls.construct_fast(_asarray(cvals), _asarray(t), extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(bp, BPoly):
raise TypeError(f".from_bernstein_basis only accepts BPoly instances. "
f"Got {type(bp)} instead.")
dx = np.diff(bp._x)
k = bp._c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp._c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp._c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(bp._asarray(c), bp.x, extrapolate, bp.axis)
@xp_capabilities(
cpu_only=True, jax_jit=False,
skip_backends=[
("dask.array",
"https://github.com/data-apis/array-api-extra/issues/488")
]
)
| PPoly |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 60633,
"end": 61010
} | class ____(_ConfigBase):
type_: PQEncoderType
distribution: PQEncoderDistribution
def to_dict(self) -> Dict[str, Any]:
ret_dict = super().to_dict()
ret_dict["type"] = str(ret_dict.pop("type"))
ret_dict["distribution"] = str(ret_dict.pop("distribution"))
return ret_dict
PQEncoderConfig = _PQEncoderConfig
@dataclass
| _PQEncoderConfig |
python | bokeh__bokeh | src/bokeh/models/widgets/tables.py | {
"start": 22407,
"end": 23054
} | class ____(Widget):
''' Abstract base class for data table (data grid) widgets.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
source = Instance(DataSource, default=InstanceDefault(ColumnDataSource), help="""
The source of data for the widget.
""")
view = Instance(CDSView, default=InstanceDefault(CDSView), help="""
A view into the data source to use when rendering table rows. A default view
of the entire data source is created if a view is not passed in during
initialization.
""")
| TableWidget |
python | getsentry__sentry | src/sentry/models/pullrequest.py | {
"start": 1986,
"end": 6548
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField(db_index=True)
repository_id = BoundedPositiveIntegerField()
key = models.CharField(max_length=64) # example, 5131 on github
date_added = models.DateTimeField(default=timezone.now, db_index=True)
title = models.TextField(null=True)
message = models.TextField(null=True)
author = FlexibleForeignKey("sentry.CommitAuthor", null=True)
merge_commit_sha = models.CharField(max_length=64, null=True, db_index=True)
objects: ClassVar[PullRequestManager] = PullRequestManager()
class Meta:
app_label = "sentry"
db_table = "sentry_pull_request"
indexes = (
models.Index(fields=("repository_id", "date_added")),
models.Index(fields=("organization_id", "merge_commit_sha")),
)
unique_together = (("repository_id", "key"),)
__repr__ = sane_repr("organization_id", "repository_id", "key")
def find_referenced_groups(self) -> set[Group]:
text = f"{self.message} {self.title}"
return find_referenced_groups(text, self.organization_id)
def get_external_url(self) -> str | None:
from sentry.models.repository import Repository
from sentry.plugins.base import bindings
repository = Repository.objects.get(id=self.repository_id)
provider_id = repository.provider
if not provider_id or not provider_id.startswith("integrations:"):
return None
provider_cls = bindings.get("integration-repository.provider").get(provider_id)
provider = provider_cls(provider_id)
return provider.pull_request_url(repository, self)
def is_unused(self, cutoff_date: datetime) -> bool:
"""
Returns True if PR should be deleted, False if it should be kept.
"""
# Use the class method to get the filter for unused PRs
unused_filter = PullRequest.get_unused_filter(cutoff_date)
# Check if this PR matches the unused filter
return PullRequest.objects.filter(id=self.id).filter(unused_filter).exists()
@classmethod
def get_unused_filter(cls, cutoff_date: datetime) -> Q:
"""
Returns a Q object that filters for unused PRs.
This is the inverse of what makes a PR "in use".
"""
from sentry.models.grouplink import GroupLink
from sentry.models.releasecommit import ReleaseCommit
from sentry.models.releaseheadcommit import ReleaseHeadCommit
# Subquery for checking if there's a valid GroupLink
grouplink_exists = Exists(
GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.pull_request,
linked_id=OuterRef("id"),
group__project__isnull=False,
)
)
# Subquery for checking if comment has valid group_ids
# Note: Django aliases the table as U0 in the EXISTS subquery
comment_has_valid_group = Exists(
PullRequestComment.objects.filter(
pull_request_id=OuterRef("id"),
group_ids__isnull=False,
)
.exclude(group_ids__len=0)
.extra(
where=[
"""EXISTS (
SELECT 1 FROM sentry_groupedmessage g
WHERE g.id = ANY(U0.group_ids)
)"""
]
)
)
recent_comment_exists = Exists(
PullRequestComment.objects.filter(
pull_request_id=OuterRef("id"),
).filter(Q(created_at__gte=cutoff_date) | Q(updated_at__gte=cutoff_date))
)
commit_in_release = Exists(ReleaseCommit.objects.filter(commit_id=OuterRef("commit_id")))
commit_in_head = Exists(ReleaseHeadCommit.objects.filter(commit_id=OuterRef("commit_id")))
commit_exists = Exists(
PullRequestCommit.objects.filter(
pull_request_id=OuterRef("id"),
).filter(Q(commit__date_added__gte=cutoff_date) | commit_in_release | commit_in_head)
)
# Define what makes a PR "in use" (should be kept)
keep_conditions = (
Q(date_added__gte=cutoff_date)
| recent_comment_exists
| commit_exists
| grouplink_exists
| comment_has_valid_group
)
# Return the inverse - we want PRs that DON'T meet any keep conditions
return ~keep_conditions
@region_silo_model
| PullRequest |
python | doocs__leetcode | solution/0600-0699/0693.Binary Number with Alternating Bits/Solution2.py | {
"start": 0,
"end": 120
} | class ____:
def hasAlternatingBits(self, n: int) -> bool:
n ^= n >> 1
return (n & (n + 1)) == 0
| Solution |
python | tiangolo__fastapi | tests/test_tuples.py | {
"start": 253,
"end": 12068
} | class ____(BaseModel):
x: float
y: float
@app.post("/model-with-tuple/")
def post_model_with_tuple(item_group: ItemGroup):
return item_group
@app.post("/tuple-of-models/")
def post_tuple_of_models(square: Tuple[Coordinate, Coordinate]):
return square
@app.post("/tuple-form/")
def hello(values: Tuple[int, int] = Form()):
return values
client = TestClient(app)
def test_model_with_tuple_valid():
data = {"items": [["foo", "bar"], ["baz", "whatelse"]]}
response = client.post("/model-with-tuple/", json=data)
assert response.status_code == 200, response.text
assert response.json() == data
def test_model_with_tuple_invalid():
data = {"items": [["foo", "bar"], ["baz", "whatelse", "too", "much"]]}
response = client.post("/model-with-tuple/", json=data)
assert response.status_code == 422, response.text
data = {"items": [["foo", "bar"], ["baz"]]}
response = client.post("/model-with-tuple/", json=data)
assert response.status_code == 422, response.text
def test_tuple_with_model_valid():
data = [{"x": 1, "y": 2}, {"x": 3, "y": 4}]
response = client.post("/tuple-of-models/", json=data)
assert response.status_code == 200, response.text
assert response.json() == data
def test_tuple_with_model_invalid():
data = [{"x": 1, "y": 2}, {"x": 3, "y": 4}, {"x": 5, "y": 6}]
response = client.post("/tuple-of-models/", json=data)
assert response.status_code == 422, response.text
data = [{"x": 1, "y": 2}]
response = client.post("/tuple-of-models/", json=data)
assert response.status_code == 422, response.text
def test_tuple_form_valid():
response = client.post("/tuple-form/", data={"values": ("1", "2")})
assert response.status_code == 200, response.text
assert response.json() == [1, 2]
def test_tuple_form_invalid():
response = client.post("/tuple-form/", data={"values": ("1", "2", "3")})
assert response.status_code == 422, response.text
response = client.post("/tuple-form/", data={"values": ("1")})
assert response.status_code == 422, response.text
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/model-with-tuple/": {
"post": {
"summary": "Post Model With Tuple",
"operationId": "post_model_with_tuple_model_with_tuple__post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/ItemGroup"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/tuple-of-models/": {
"post": {
"summary": "Post Tuple Of Models",
"operationId": "post_tuple_of_models_tuple_of_models__post",
"requestBody": {
"content": {
"application/json": {
"schema": IsDict(
{
"title": "Square",
"maxItems": 2,
"minItems": 2,
"type": "array",
"prefixItems": [
{"$ref": "#/components/schemas/Coordinate"},
{"$ref": "#/components/schemas/Coordinate"},
],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{
"title": "Square",
"maxItems": 2,
"minItems": 2,
"type": "array",
"items": [
{"$ref": "#/components/schemas/Coordinate"},
{"$ref": "#/components/schemas/Coordinate"},
],
}
)
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/tuple-form/": {
"post": {
"summary": "Hello",
"operationId": "hello_tuple_form__post",
"requestBody": {
"content": {
"application/x-www-form-urlencoded": {
"schema": {
"$ref": "#/components/schemas/Body_hello_tuple_form__post"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_hello_tuple_form__post": {
"title": "Body_hello_tuple_form__post",
"required": ["values"],
"type": "object",
"properties": {
"values": IsDict(
{
"title": "Values",
"maxItems": 2,
"minItems": 2,
"type": "array",
"prefixItems": [
{"type": "integer"},
{"type": "integer"},
],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{
"title": "Values",
"maxItems": 2,
"minItems": 2,
"type": "array",
"items": [{"type": "integer"}, {"type": "integer"}],
}
)
},
},
"Coordinate": {
"title": "Coordinate",
"required": ["x", "y"],
"type": "object",
"properties": {
"x": {"title": "X", "type": "number"},
"y": {"title": "Y", "type": "number"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"ItemGroup": {
"title": "ItemGroup",
"required": ["items"],
"type": "object",
"properties": {
"items": {
"title": "Items",
"type": "array",
"items": IsDict(
{
"maxItems": 2,
"minItems": 2,
"type": "array",
"prefixItems": [
{"type": "string"},
{"type": "string"},
],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{
"maxItems": 2,
"minItems": 2,
"type": "array",
"items": [{"type": "string"}, {"type": "string"}],
}
),
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
| Coordinate |
python | numba__numba | numba/core/types/common.py | {
"start": 266,
"end": 532
} | class ____(IterableType):
def __init__(self, name, iterator_type):
self._iterator_type = iterator_type
super(SimpleIterableType, self).__init__(name)
@property
def iterator_type(self):
return self._iterator_type
| SimpleIterableType |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_write_cell_style_xfs.py | {
"start": 332,
"end": 944
} | class ____(unittest.TestCase):
"""
Test the Styles _write_cell_style_xfs() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_cell_style_xfs(self):
"""Test the _write_cell_style_xfs() method"""
self.styles.xf_formats = [Format()]
self.styles._write_cell_style_xfs()
exp = """<cellStyleXfs count="1"><xf numFmtId="0" fontId="0" fillId="0" borderId="0"/></cellStyleXfs>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteCellStyleXfs |
python | dask__distributed | distributed/system_monitor.py | {
"start": 358,
"end": 8971
} | class ____:
proc: psutil.Process
maxlen: int | None
count: int
last_time: float
quantities: dict[str, deque[float]]
monitor_net_io: bool
monitor_disk_io: bool
monitor_host_cpu: bool
monitor_gil_contention: bool
_last_net_io_counters: Any # psutil namedtuple
_last_disk_io_counters: Any # psutil namedtuple
_last_host_cpu_counters: Any # dynamically-defined psutil namedtuple
_last_gil_contention: float # 0-1 value
cumulative_gil_contention: float
gpu_name: str | None
gpu_memory_total: int
def __init__(
self,
maxlen: int | None | NoDefault = no_default,
monitor_disk_io: bool | None = None,
monitor_host_cpu: bool | None = None,
monitor_gil_contention: bool | None = None,
):
self.proc = psutil.Process()
self.count = 0
if maxlen is no_default:
maxlen = dask.config.get("distributed.admin.system-monitor.log-length")
if isinstance(maxlen, int):
maxlen = max(1, maxlen)
elif maxlen is not None: # pragma: nocover
raise TypeError(f"maxlen must be int or None; got {maxlen!r}")
self.maxlen = maxlen
self.last_time = monotonic()
self.quantities = {
"cpu": deque(maxlen=maxlen),
"memory": deque(maxlen=maxlen),
"time": deque(maxlen=maxlen),
}
try:
self._last_net_io_counters = psutil.net_io_counters()
except Exception:
# FIXME is this possible?
self.monitor_net_io = False # pragma: nocover
else:
self.monitor_net_io = True
self.quantities["host_net_io.read_bps"] = deque(maxlen=maxlen)
self.quantities["host_net_io.write_bps"] = deque(maxlen=maxlen)
if monitor_disk_io is None:
monitor_disk_io = dask.config.get("distributed.admin.system-monitor.disk")
if monitor_disk_io:
try:
disk_ioc = psutil.disk_io_counters()
except Exception:
# FIXME occurs when psutil version doesn't have handling for given platform / kernel;
# should we explicitly error in this case?
monitor_disk_io = False # pragma: nocover
else:
if disk_ioc is None: # pragma: nocover
# diskless machine
monitor_disk_io = False
else:
self._last_disk_io_counters = disk_ioc
self.quantities["host_disk_io.read_bps"] = deque(maxlen=maxlen)
self.quantities["host_disk_io.write_bps"] = deque(maxlen=maxlen)
self.monitor_disk_io = monitor_disk_io
if monitor_host_cpu is None:
monitor_host_cpu = dask.config.get(
"distributed.admin.system-monitor.host-cpu"
)
self.monitor_host_cpu = monitor_host_cpu
if monitor_host_cpu:
self._last_host_cpu_counters = hostcpu_c = psutil.cpu_times()
# This is a namedtuple whose fields change based on OS and kernel version
for k in hostcpu_c._fields:
self.quantities["host_cpu." + k] = deque(maxlen=maxlen)
if monitor_gil_contention is None:
monitor_gil_contention = dask.config.get(
"distributed.admin.system-monitor.gil.enabled"
)
self.monitor_gil_contention = monitor_gil_contention
if self.monitor_gil_contention:
try:
from gilknocker import KnockKnock
except ImportError:
self.monitor_gil_contention = False
else:
self.quantities["gil_contention"] = deque(maxlen=maxlen)
self.cumulative_gil_contention = 0.0
raw_interval = dask.config.get(
"distributed.admin.system-monitor.gil.interval",
)
interval = parse_timedelta(raw_interval) * 1e6
self._gilknocker = KnockKnock(polling_interval_micros=int(interval))
self._gilknocker.start()
if not WINDOWS:
self.quantities["num_fds"] = deque(maxlen=maxlen)
if nvml.device_get_count() > 0:
gpu_extra = nvml.one_time()
self.gpu_name = gpu_extra["name"]
self.gpu_memory_total = gpu_extra["memory-total"]
self.quantities["gpu-memory-total"] = deque(maxlen=1)
self.quantities["gpu_utilization"] = deque(maxlen=maxlen)
self.quantities["gpu_memory_used"] = deque(maxlen=maxlen)
else:
self.gpu_name = None
self.gpu_memory_total = -1
self.update()
def recent(self) -> dict[str, float]:
return {k: v[-1] for k, v in self.quantities.items()}
def get_process_memory(self) -> int:
"""Sample process memory, as reported by the OS.
This one-liner function exists so that it can be easily mocked in unit tests,
as the OS allocating and releasing memory is highly volatile and a constant
source of flakiness.
"""
return self.proc.memory_info().rss
def update(self) -> dict[str, Any]:
now = time()
now_mono = monotonic()
duration = (now_mono - self.last_time) or 0.001
self.last_time = now_mono
self.count += 1
with self.proc.oneshot():
result = {
"cpu": self.proc.cpu_percent(),
"memory": self.get_process_memory(),
"time": now,
}
if self.monitor_net_io:
net_ioc = psutil.net_io_counters()
last = self._last_net_io_counters
result["host_net_io.read_bps"] = (
net_ioc.bytes_recv - last.bytes_recv
) / duration
result["host_net_io.write_bps"] = (
net_ioc.bytes_sent - last.bytes_sent
) / duration
self._last_net_io_counters = net_ioc
if self.monitor_disk_io:
disk_ioc = psutil.disk_io_counters()
assert disk_ioc is not None
last_disk = self._last_disk_io_counters
result["host_disk_io.read_bps"] = (
disk_ioc.read_bytes - last_disk.read_bytes
) / duration
result["host_disk_io.write_bps"] = (
disk_ioc.write_bytes - last_disk.write_bytes
) / duration
self._last_disk_io_counters = disk_ioc
if self.monitor_host_cpu:
host_cpu = psutil.cpu_times()
last_cpu = self._last_host_cpu_counters
for k in host_cpu._fields:
delta = getattr(host_cpu, k) - getattr(last_cpu, k)
# cpu_times() has a precision of 2 decimals; suppress noise
result["host_cpu." + k] = round(delta / duration, 2)
self._last_host_cpu_counters = host_cpu
if self.monitor_gil_contention:
gil_contention = self._gilknocker.contention_metric
self._gilknocker.reset_contention_metric()
result["gil_contention"] = self._last_gil_contention = gil_contention
self.cumulative_gil_contention += duration * gil_contention
# Note: WINDOWS constant doesn't work with `mypy --platform win32`
if sys.platform != "win32":
result["num_fds"] = self.proc.num_fds()
if self.gpu_name:
gpu_metrics = nvml.real_time()
result["gpu-memory-total"] = self.gpu_memory_total
result["gpu_utilization"] = gpu_metrics["utilization"]
result["gpu_memory_used"] = gpu_metrics["memory-used"]
for name, v in result.items():
if name != "count":
self.quantities[name].append(v)
return result
def __repr__(self) -> str:
return "<SystemMonitor: cpu: %d memory: %d MB fds: %s>" % (
self.quantities["cpu"][-1],
self.quantities["memory"][-1] / 1e6,
"N/A" if WINDOWS else self.quantities["num_fds"][-1],
)
def range_query(self, start: int) -> dict[str, list[float | None]]:
if start >= self.count:
return {k: [] for k in self.quantities}
istart = min(-1, max(-len(self.quantities["cpu"]), start - self.count))
return {
k: [v[i] if -i <= len(v) else None for i in range(istart, 0)]
for k, v in self.quantities.items()
}
def close(self) -> None:
if self.monitor_gil_contention:
self._gilknocker.stop()
| SystemMonitor |
python | getsentry__sentry | src/sentry/api/endpoints/user_subscriptions.py | {
"start": 829,
"end": 4307
} | class ____(UserEndpoint):
owner = ApiOwner.UNOWNED
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, user) -> Response:
"""
Retrieve Account Subscriptions
`````````````````````````````````````
Return list of subscriptions for an account
:auth: required
"""
# This returns a dict with `subscriber` and `subscriptions`
# Returns `None` if no subscriptions for user
sub = newsletter.backend.get_subscriptions(user)
if sub is None or not newsletter.backend.is_enabled():
return self.respond([])
return self.respond(
[
{
"listId": x.get("list_id"),
"listDescription": x.get("list_description"),
"listName": x.get("list_name"),
"email": x.get("email"),
"subscribed": x.get("subscribed"),
"subscribedDate": x.get("subscribed_date"),
"unsubscribedDate": x.get("unsubscribed_date"),
}
for x in sub["subscriptions"]
]
)
def put(self, request: Request, user) -> Response:
"""
Update Account Subscriptions
````````````````````````````
Update account subscriptions to newsletter
:param int listId: id of newsletter list
:param boolean subscribed: should be subscribed to newsletter
:auth: required
"""
validator = NewsletterValidator(data=request.data)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
result = validator.validated_data
email = UserEmail.objects.get_primary_email(user)
kwargs = {
"list_id": result["listId"],
"subscribed": result["subscribed"],
"verified": email.is_verified,
}
if not result["subscribed"]:
kwargs["unsubscribed_date"] = timezone.now()
else:
kwargs["subscribed_date"] = timezone.now()
newsletter.backend.create_or_update_subscription(user, **kwargs)
return self.respond(status=204)
def post(self, request: Request, user) -> Response:
"""
Configure Newsletter Subscription
`````````````````````````````````
Update the default newsletter subscription.
:param boolean subscribed: should be subscribed to newsletter
:auth: required
"""
validator = DefaultNewsletterValidator(data=request.data)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
result = validator.validated_data
email = UserEmail.objects.get_primary_email(user)
kwargs = {
"subscribed": result["subscribed"],
"verified": email.is_verified,
"list_ids": newsletter.backend.get_default_list_ids(),
}
if not result["subscribed"]:
kwargs["unsubscribed_date"] = timezone.now()
else:
kwargs["subscribed_date"] = timezone.now()
newsletter.backend.create_or_update_subscriptions(user, **kwargs)
user.update(flags=F("flags").bitand(~User.flags.newsletter_consent_prompt))
return self.respond(status=204)
| UserSubscriptionsEndpoint |
python | Pylons__pyramid | src/pyramid/config/testing.py | {
"start": 257,
"end": 6937
} | class ____:
# testing API
def testing_securitypolicy(
self,
userid=None,
identity=None,
permissive=True,
remember_result=None,
forget_result=None,
):
"""Unit/integration testing helper. Registers a faux :term:`security
policy`.
This function is most useful when testing code that uses the security
APIs, such as :meth:`pyramid.request.Request.identity`,
:attr:`pyramid.request.Request.authenticated_userid`, or
:meth:`pyramid.request.Request.has_permission`,
The behavior of the registered :term:`security policy` depends on the
arguments passed to this method.
:param userid: If provided, the policy's ``authenticated_userid``
method will return this value. As a result,
:attr:`pyramid.request.Request.authenticated_userid` will have this
value as well.
:type userid: str
:param identity: If provided, the policy's ``identity`` method will
return this value. As a result,
:attr:`pyramid.request.Request.identity`` will have this value.
:type identity: object
:param permissive: If true, the policy will allow access to any user
for any permission. If false, the policy will deny all access.
:type permissive: bool
:param remember_result: If provided, the policy's ``remember`` method
will return this value. Otherwise, ``remember`` will return an
empty list.
:type remember_result: list
:param forget_result: If provided, the policy's ``forget`` method will
return this value. Otherwise, ``forget`` will return an empty
list.
:type forget_result: list
.. versionadded:: 1.4
The ``remember_result`` argument.
.. versionadded:: 1.4
The ``forget_result`` argument.
.. versionchanged:: 2.0
Removed ``groupids`` argument and add `identity` argument.
"""
from pyramid.testing import DummySecurityPolicy
policy = DummySecurityPolicy(
userid, identity, permissive, remember_result, forget_result
)
self.registry.registerUtility(policy, ISecurityPolicy)
return policy
def testing_resources(self, resources):
"""Unit/integration testing helper: registers a dictionary of
:term:`resource` objects that can be resolved via the
:func:`pyramid.traversal.find_resource` API.
The :func:`pyramid.traversal.find_resource` API is called with
a path as one of its arguments. If the dictionary you
register when calling this method contains that path as a
string key (e.g. ``/foo/bar`` or ``foo/bar``), the
corresponding value will be returned to ``find_resource`` (and
thus to your code) when
:func:`pyramid.traversal.find_resource` is called with an
equivalent path string or tuple.
"""
class DummyTraverserFactory:
def __init__(self, context):
self.context = context
def __call__(self, request):
path = request.path_info
ob = resources[path]
traversed = split_path_info(path)
return {
'context': ob,
'view_name': '',
'subpath': (),
'traversed': traversed,
'virtual_root': ob,
'virtual_root_path': (),
'root': ob,
}
self.registry.registerAdapter(
DummyTraverserFactory, (Interface,), ITraverser
)
return resources
testing_models = testing_resources # b/w compat
@action_method
def testing_add_subscriber(self, event_iface=None):
"""Unit/integration testing helper: Registers a
:term:`subscriber` which listens for events of the type
``event_iface``. This method returns a list object which is
appended to by the subscriber whenever an event is captured.
When an event is dispatched that matches the value implied by
the ``event_iface`` argument, that event will be appended to
the list. You can then compare the values in the list to
expected event notifications. This method is useful when
testing code that wants to call
:meth:`pyramid.registry.Registry.notify`,
or :func:`zope.component.event.dispatch`.
The default value of ``event_iface`` (``None``) implies a
subscriber registered for *any* kind of event.
"""
event_iface = self.maybe_dotted(event_iface)
L = []
def subscriber(*event):
L.extend(event)
self.add_subscriber(subscriber, event_iface)
return L
def testing_add_renderer(self, path, renderer=None):
"""Unit/integration testing helper: register a renderer at
``path`` (usually a relative filename ala ``templates/foo.pt``
or an asset specification) and return the renderer object.
If the ``renderer`` argument is None, a 'dummy' renderer will
be used. This function is useful when testing code that calls
the :func:`pyramid.renderers.render` function or
:func:`pyramid.renderers.render_to_response` function or
any other ``render_*`` or ``get_*`` API of the
:mod:`pyramid.renderers` module.
Note that calling this method for with a ``path`` argument
representing a renderer factory type (e.g. for ``foo.pt``
usually implies the ``chameleon_zpt`` renderer factory)
clobbers any existing renderer factory registered for that
type.
.. note:: This method is also available under the alias
``testing_add_template`` (an older name for it).
"""
from pyramid.testing import DummyRendererFactory
helper = RendererHelper(name=path, registry=self.registry)
factory = self.registry.queryUtility(
IRendererFactory, name=helper.type
)
if not isinstance(factory, DummyRendererFactory):
factory = DummyRendererFactory(helper.type, factory)
self.registry.registerUtility(
factory, IRendererFactory, name=helper.type
)
from pyramid.testing import DummyTemplateRenderer
if renderer is None:
renderer = DummyTemplateRenderer()
factory.add(path, renderer)
return renderer
testing_add_template = testing_add_renderer
| TestingConfiguratorMixin |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/cache.py | {
"start": 2361,
"end": 2860
} | class ____(_TransformedFnCache):
"""A function cache based on unbound function objects.
Using the function for the cache key allows efficient handling of object
methods.
Unlike the _CodeObjectCache, this discriminates between different functions
even if they have the same code. This is needed for decorators that may
masquerade as another function.
"""
def _get_key(self, entity):
if inspect.ismethod(entity):
return entity.__func__
return entity
| UnboundInstanceCache |
python | getsentry__sentry | tests/sentry/db/test_router.py | {
"start": 5206,
"end": 8048
} | class ____(TestCase):
"""Isolated mode raises errors for the 'other' silo"""
@override_settings(SILO_MODE="CONTROL")
def test_for_control(self) -> None:
router = SiloRouter()
router.use_simulated(False)
assert "default" == router.db_for_read(User)
assert "default" == router.db_for_write(User)
assert router.allow_migrate("default", "sentry", User)
assert not router.allow_migrate("control", "sentry", User)
assert not router.allow_migrate(
"default", "sentry", model=None, tables=["jira_ac_tenant"]
), "Removed tables end up excluded from migrations"
with pytest.raises(ValueError):
router.db_for_read(Organization)
with pytest.raises(ValueError):
router.db_for_write(Organization)
with pytest.raises(ValueError):
router.allow_migrate("default", "sentry", Organization)
with pytest.raises(ValueError):
router.allow_migrate("default", "sentry", tables=["sentry_pagerdutyservice"])
@override_settings(SILO_MODE="REGION")
def test_for_region(self) -> None:
router = SiloRouter()
router.use_simulated(False)
assert "default" == router.db_for_read(Organization)
assert "default" == router.db_for_write(Organization)
assert router.allow_migrate("default", "sentry", Organization)
assert not router.allow_migrate("region", "sentry", Organization)
assert not router.allow_migrate(
"default", "sentry", model=None, tables=["jira_ac_tenant"]
), "Removed tables end up excluded from migrations"
assert router.allow_migrate(
"default", "sentry", hints={"tables": ["sentry_pagerdutyservice"]}
), "Historical silo mapped tables can be migrated"
with pytest.raises(ValueError):
router.db_for_read(User)
with pytest.raises(ValueError):
router.db_for_write(User)
# Can't migrate region/control in isolated silos
with pytest.raises(ValueError):
router.allow_migrate("control", "sentry", User)
@override_settings(SILO_MODE="MONOLITH")
def test_for_monolith(self) -> None:
router = SiloRouter()
router.use_simulated(False)
assert "default" == router.db_for_read(Organization)
assert "default" == router.db_for_read(User)
assert "default" == router.db_for_write(Organization)
assert "default" == router.db_for_write(User)
assert router.allow_migrate("default", "sentry", Organization)
assert router.allow_migrate("default", "sentry", User)
assert router.allow_migrate(
"default", "sentry", hints={"tables": ["sentry_pagerdutyservice"]}
), "Historical silo mapped tables can be migrated"
| SiloRouterIsolatedTest |
python | google__python-fire | fire/console/platforms.py | {
"start": 863,
"end": 955
} | class ____(Exception):
"""Base class for exceptions in the platforms module."""
pass
| Error |
python | falconry__falcon | tests/test_error_handlers.py | {
"start": 10026,
"end": 11202
} | class ____:
@pytest.fixture()
def body_client(self, asgi, util):
app = util.create_app(asgi=asgi)
app.add_route('/error', CustomErrorResource())
if asgi:
async def handle_zero_division(req, resp, ex, params):
assert await resp.render_body() is None
resp.status = falcon.HTTP_719
else:
def handle_zero_division(req, resp, ex, params):
assert resp.render_body() is None
resp.status = falcon.HTTP_719
app.add_error_handler(ZeroDivisionError, handle_zero_division)
return testing.TestClient(app)
def test_data_is_set(self, body_client):
res = body_client.simulate_get('/error')
assert res.status == falcon.HTTP_719
assert res.content == b''
def test_media_is_set(self, body_client):
res = body_client.simulate_post('/error')
assert res.status == falcon.HTTP_719
assert res.content == b''
def test_body_is_set(self, body_client):
res = body_client.simulate_put('/error')
assert res.status == falcon.HTTP_719
assert res.content == b''
| TestCustomError |
python | kamyu104__LeetCode-Solutions | Python/maximum-total-beauty-of-the-gardens.py | {
"start": 3796,
"end": 5152
} | class ____(object):
def maximumBeauty(self, flowers, newFlowers, target, full, partial):
"""
:type flowers: List[int]
:type newFlowers: int
:type target: int
:type full: int
:type partial: int
:rtype: int
"""
def check(prefix, total, x):
return (prefix[x]-prefix[x-1])*x-prefix[x] <= total
def binary_search_right(prefix, total, left, right):
while left <= right:
mid = left+(right-left)//2
if not check(prefix, total, mid):
right = mid-1
else:
left = mid+1
return right
flowers.sort()
n = bisect.bisect_left(flowers, target)
prefix = [0]*(n+1)
for i in xrange(n):
prefix[i+1] = prefix[i]+flowers[i]
result = suffix = 0
left = n
for right in reversed(xrange(n+1)):
if right != n:
suffix += flowers[right]
total = newFlowers-((n-right)*target-suffix)
if total < 0:
break
left = binary_search_right(prefix, total, 1, right)
mn = min((total+prefix[left])//left if left else 0, target-1)
result = max(result, mn*partial+(len(flowers)-right)*full)
return result
| Solution4 |
python | prabhupant__python-ds | data_structures/binary_trees/logical_and_tree.py | {
"start": 160,
"end": 466
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def convert(root):
if root is None:
return
convert(root.left)
convert(root.right)
if root.left and root.right:
root.val = root.left.val & root.right.val
| Node |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_batch.py | {
"start": 5342,
"end": 7699
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a job and wait for the operation to be completed.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param job_name: Required. The name of the job to be deleted.
:param timeout: The timeout for this request.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("project_id", "region", "gcp_conn_id", "impersonation_chain", "job_name")
def __init__(
self,
project_id: str,
region: str,
job_name: str,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.job_name = job_name
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook: CloudBatchHook = CloudBatchHook(self.gcp_conn_id, self.impersonation_chain)
operation = hook.delete_job(job_name=self.job_name, region=self.region, project_id=self.project_id)
self._wait_for_operation(operation)
def _wait_for_operation(self, operation: operation.Operation):
try:
return operation.result(timeout=self.timeout)
except Exception:
error = operation.exception(timeout=self.timeout)
raise AirflowException(error)
| CloudBatchDeleteJobOperator |
python | sphinx-doc__sphinx | sphinx/domains/python/__init__.py | {
"start": 23631,
"end": 38798
} | class ____(Domain):
"""Python language domain."""
name = 'py'
label = 'Python'
object_types = {
'function': ObjType(_('function'), 'func', 'obj'),
'data': ObjType(_('data'), 'data', 'obj'),
'class': ObjType(_('class'), 'class', 'exc', 'obj'),
'exception': ObjType(_('exception'), 'exc', 'class', 'obj'),
'method': ObjType(_('method'), 'meth', 'obj'),
'classmethod': ObjType(_('class method'), 'meth', 'obj'),
'staticmethod': ObjType(_('static method'), 'meth', 'obj'),
'attribute': ObjType(_('attribute'), 'attr', 'obj'),
'property': ObjType(_('property'), 'attr', '_prop', 'obj'),
'type': ObjType(_('type alias'), 'type', 'class', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
}
directives = {
'function': PyFunction,
'data': PyVariable,
'class': PyClasslike,
'exception': PyClasslike,
'method': PyMethod,
'classmethod': PyClassMethod,
'staticmethod': PyStaticMethod,
'attribute': PyAttribute,
'property': PyProperty,
'type': PyTypeAlias,
'module': PyModule,
'currentmodule': PyCurrentModule,
'decorator': PyDecoratorFunction,
'decoratormethod': PyDecoratorMethod,
}
roles = {
'data': PyXRefRole(),
'exc': PyXRefRole(),
'func': PyXRefRole(fix_parens=True),
'deco': _PyDecoXRefRole(),
'class': PyXRefRole(),
'const': PyXRefRole(),
'attr': PyXRefRole(),
'type': PyXRefRole(),
'meth': PyXRefRole(fix_parens=True),
'mod': PyXRefRole(),
'obj': PyXRefRole(),
}
initial_data: ClassVar[dict[str, dict[str, tuple[Any]]]] = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
}
indices = [
PythonModuleIndex,
]
@property
def objects(self) -> dict[str, ObjectEntry]:
return self.data.setdefault('objects', {}) # fullname -> ObjectEntry
def note_object(
self,
name: str,
objtype: str,
node_id: str,
aliased: bool = False,
location: Any = None,
) -> None:
"""Note a python object for cross reference.
.. versionadded:: 2.1
"""
if name in self.objects:
other = self.objects[name]
if other.aliased and aliased is False:
# The original definition found. Override it!
pass
elif other.aliased is False and aliased:
# The original definition is already registered.
return
else:
# duplicated
logger.warning(
__(
'duplicate object description of %s, '
'other instance in %s, use :no-index: for one of them'
),
name,
other.docname,
location=location,
)
self.objects[name] = ObjectEntry(
self.env.current_document.docname, node_id, objtype, aliased
)
@property
def modules(self) -> dict[str, ModuleEntry]:
return self.data.setdefault('modules', {}) # modname -> ModuleEntry
def note_module(
self, name: str, node_id: str, synopsis: str, platform: str, deprecated: bool
) -> None:
"""Note a python module for cross reference.
.. versionadded:: 2.1
"""
self.modules[name] = ModuleEntry(
docname=self.env.current_document.docname,
node_id=node_id,
synopsis=synopsis,
platform=platform,
deprecated=deprecated,
)
def clear_doc(self, docname: str) -> None:
to_remove = [
fullname for fullname, obj in self.objects.items() if obj.docname == docname
]
for fullname in to_remove:
del self.objects[fullname]
to_remove = [
modname for modname, mod in self.modules.items() if mod.docname == docname
]
for fullname in to_remove:
del self.modules[fullname]
def merge_domaindata(self, docnames: Set[str], otherdata: dict[str, Any]) -> None:
# XXX check duplicates?
for fullname, obj in otherdata['objects'].items():
if obj.docname in docnames:
self.objects[fullname] = obj
for modname, mod in otherdata['modules'].items():
if mod.docname in docnames:
self.modules[modname] = mod
def find_obj(
self,
env: BuildEnvironment,
modname: str,
classname: str,
name: str,
type: str | None,
searchmode: int = 0,
) -> list[tuple[str, ObjectEntry]]:
"""Find a Python object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
# skip parens
name = name.removesuffix('()')
if not name:
return []
matches: list[tuple[str, ObjectEntry]] = []
newname = None
if searchmode == 1:
if type is None:
objtypes: list[str] | None = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if (
fullname in self.objects
and self.objects[fullname].objtype in objtypes
):
newname = fullname
if not newname:
if (
modname
and f'{modname}.{name}' in self.objects
and self.objects[f'{modname}.{name}'].objtype in objtypes
):
newname = f'{modname}.{name}'
elif (
name in self.objects and self.objects[name].objtype in objtypes
):
newname = name
else:
# "fuzzy" searching mode
searchname = f'.{name}'
matches = [
(oname, self.objects[oname])
for oname in self.objects
if oname.endswith(searchname)
and self.objects[oname].objtype in objtypes
]
else:
# NOTE: searching for exact match, object type is not considered
if name in self.objects:
newname = name
elif type == 'mod':
# only exact matches allowed for modules
return []
elif classname and classname + '.' + name in self.objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in self.objects:
newname = modname + '.' + name
elif (
modname
and classname
and modname + '.' + classname + '.' + name in self.objects
):
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, self.objects[newname]))
return matches
def resolve_xref(
self,
env: BuildEnvironment,
fromdocname: str,
builder: Builder,
type: str,
target: str,
node: pending_xref,
contnode: Element,
) -> nodes.reference | None:
modname = node.get('py:module')
clsname = node.get('py:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target, type, searchmode)
if not matches and type == 'class':
# fallback to data/attr (for type aliases)
# type aliases are documented as data/attr but referenced as class
matches = self.find_obj(env, modname, clsname, target, 'data', searchmode)
if not matches:
matches = self.find_obj(
env, modname, clsname, target, 'attr', searchmode
)
if not matches and type == 'attr':
# fallback to meth (for property; Sphinx 2.4.x)
# this ensures that `:attr:` role continues to refer to the old property entry
# that defined by ``method`` directive in old reST files.
matches = self.find_obj(env, modname, clsname, target, 'meth', searchmode)
if not matches and type == 'meth':
# fallback to attr (for property)
# this ensures that `:meth:` in the old reST files can refer to the property
# entry that defined by ``property`` directive.
#
# Note: _prop is a secret role only for internal look-up.
matches = self.find_obj(env, modname, clsname, target, '_prop', searchmode)
if not matches:
return None
elif len(matches) > 1:
canonicals = [m for m in matches if not m[1].aliased]
if len(canonicals) == 1:
matches = canonicals
else:
logger.warning(
__('more than one target found for cross-reference %r: %s'),
target,
', '.join(match[0] for match in matches),
type='ref',
subtype='python',
location=node,
)
name, obj = matches[0]
if obj[2] == 'module':
return self._make_module_refnode(builder, fromdocname, name, contnode)
else:
# determine the content of the reference by conditions
content = find_pending_xref_condition(node, 'resolved')
if content:
children = content.children
else:
# if not found, use contnode
children = [contnode]
return make_refnode(builder, fromdocname, obj[0], obj[1], children, name)
def resolve_any_xref(
self,
env: BuildEnvironment,
fromdocname: str,
builder: Builder,
target: str,
node: pending_xref,
contnode: Element,
) -> list[tuple[str, nodes.reference]]:
modname = node.get('py:module')
clsname = node.get('py:class')
results: list[tuple[str, nodes.reference]] = []
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
multiple_matches = len(matches) > 1
for name, obj in matches:
if multiple_matches and obj.aliased:
# Skip duplicated matches
continue
if obj[2] == 'module':
results.append((
'py:mod',
self._make_module_refnode(builder, fromdocname, name, contnode),
))
else:
# determine the content of the reference by conditions
content = find_pending_xref_condition(node, 'resolved')
if content:
children = content.children
else:
# if not found, use contnode
children = [contnode]
role = 'py:' + self.role_for_objtype(obj[2]) # type: ignore[operator]
results.append((
role,
make_refnode(builder, fromdocname, obj[0], obj[1], children, name),
))
return results
def _make_module_refnode(
self, builder: Builder, fromdocname: str, name: str, contnode: Node
) -> nodes.reference:
# get additional info for modules
module: ModuleEntry = self.modules[name]
title_parts = [name]
if module.synopsis:
title_parts.append(f': {module.synopsis}')
if module.deprecated:
title_parts.append(_(' (deprecated)'))
if module.platform:
title_parts.append(f' ({module.platform})')
title = ''.join(title_parts)
return make_refnode(
builder, fromdocname, module.docname, module.node_id, contnode, title
)
def get_objects(self) -> Iterator[tuple[str, str, str, str, str, int]]:
for modname, mod in self.modules.items():
yield modname, modname, 'module', mod.docname, mod.node_id, 0
for refname, obj in self.objects.items():
if obj.objtype != 'module': # modules are already handled
if obj.aliased:
# aliased names are not full-text searchable.
yield refname, refname, obj.objtype, obj.docname, obj.node_id, -1
else:
yield refname, refname, obj.objtype, obj.docname, obj.node_id, 1
def get_full_qualified_name(self, node: Element) -> str | None:
modname = node.get('py:module')
clsname = node.get('py:class')
target = node.get('reftarget')
if target is None:
return None
else:
return '.'.join(filter(None, [modname, clsname, target]))
def builtin_resolver(
app: Sphinx, env: BuildEnvironment, node: pending_xref, contnode: Element
) -> Element | None:
"""Do not emit nitpicky warnings for built-in types."""
if node.get('refdomain') != 'py':
return None
elif node.get('reftype') in {'class', 'obj'} and node.get('reftarget') == 'None':
return contnode
elif node.get('reftype') in {'class', 'obj', 'exc'}:
reftarget = node.get('reftarget')
if inspect.isclass(getattr(builtins, reftarget, None)):
# built-in class
return contnode
if _is_typing(reftarget):
# typing class
return contnode
return None
def _is_typing(s: str, /) -> bool:
return s.removeprefix('typing.') in _TYPING_ALL
def setup(app: Sphinx) -> ExtensionMetadata:
app.setup_extension('sphinx.directives')
app.add_domain(PythonDomain)
app.add_config_value(
'python_use_unqualified_type_names', False, 'env', types=frozenset({bool})
)
app.add_config_value(
'python_maximum_signature_line_length',
None,
'env',
types=frozenset({int, NoneType}),
)
app.add_config_value(
'python_trailing_comma_in_multi_line_signatures',
True,
'env',
types=frozenset({bool}),
)
app.add_config_value(
'python_display_short_literal_types', False, 'env', types=frozenset({bool})
)
app.connect('object-description-transform', filter_meta_fields)
app.connect('missing-reference', builtin_resolver, priority=900)
return {
'version': 'builtin',
'env_version': 4,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| PythonDomain |
python | PrefectHQ__prefect | tests/server/models/test_variables.py | {
"start": 3757,
"end": 6038
} | class ____:
async def test_no_filter(
self,
session,
variables,
):
res = await read_variables(session)
assert len(res) == 4
assert {r.id for r in res} == {v.id for v in variables}
async def test_filter_by_id(
self,
session,
variables,
):
variable = variables[0]
res = await read_variables(
session,
variable_filter=VariableFilter(id=VariableFilterId(any_=[variable.id])),
)
assert len(res) == 1
assert res[0].id == variable.id
async def test_filter_by_any_name(
self,
session,
variables,
):
res = await read_variables(
session,
variable_filter=VariableFilter(name=VariableFilterName(any_=["variable1"])),
)
assert len(res) == 1
assert {r.id for r in res} == {v.id for v in variables if "variable1" == v.name}
async def test_filter_by_like_name(
self,
session,
variables,
):
res = await read_variables(
session,
variable_filter=VariableFilter(name=VariableFilterName(like_="variable1%")),
)
assert len(res) == 2
assert {r.id for r in res} == {v.id for v in variables if "variable1" in v.name}
async def test_filter_by_tag(
self,
session,
variables,
):
res = await read_variables(
session,
variable_filter=VariableFilter(tags=VariableFilterTags(all_=["tag1"])),
)
assert len(res) == 2
assert {r.id for r in res} == {v.id for v in variables if "tag1" in v.tags}
async def test_sorted_by_name_asc(
self,
session,
variables,
):
res = await read_variables(session, sort=VariableSort.NAME_ASC)
assert len(res) == 4
assert [r.name for r in res] == sorted([v.name for v in variables])
async def test_sorted_by_name_desc(
self,
session,
variables,
):
res = await read_variables(session, sort=VariableSort.NAME_DESC)
assert len(res) == 4
assert [r.name for r in res] == sorted(
[v.name for v in variables], reverse=True
)
| TestReadVariables |
python | davidhalter__jedi | jedi/inference/value/klass.py | {
"start": 19275,
"end": 20483
} | class ____(ValueWrapper, FunctionMixin):
"""
A dataclass(-like) decorator with custom parameters.
.. code:: python
@dataclass(init=True) # this
class A: ...
@dataclass_transform
def create_model(*, init=False): pass
@create_model(init=False) # or this
class B: ...
"""
def __init__(self, function, arguments, default_init: bool = True):
"""
Args:
function: Decoratee | function
arguments: The parameters to the dataclass function decorator
default_init: Boolean to indicate the default init value
"""
super().__init__(function)
argument_init = self._init_param_value(arguments)
self.init_param_mode = (
argument_init if argument_init is not None else default_init
)
def _init_param_value(self, arguments) -> Optional[bool]:
if not arguments.argument_node:
return None
arg_nodes = (
arguments.argument_node.children
if arguments.argument_node.type == "arglist"
else [arguments.argument_node]
)
return init_param_value(arg_nodes)
| DataclassDecorator |
python | getsentry__sentry | tests/sentry/flags/endpoints/test_secrets.py | {
"start": 11493,
"end": 12877
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-flag-hooks-signing-secret"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.obj = FlagWebHookSigningSecretModel.objects.create(
created_by=self.user.id,
organization=self.organization,
provider="launchdarkly",
secret="123456123456",
)
self.url = reverse(self.endpoint, args=(self.organization.id, self.obj.id))
@property
def features(self) -> dict[str, bool]:
return {}
def test_delete(self) -> None:
with self.feature(self.features):
response = self.client.delete(self.url)
assert response.status_code == 204
def test_delete_other_organization(self) -> None:
"""Attempt to delete a secret outside your organization."""
org = self.create_organization()
obj = FlagWebHookSigningSecretModel.objects.create(
created_by=self.user.id,
organization=org,
provider="launchdarkly",
secret="123456123456",
)
url = reverse(self.endpoint, args=(self.organization.id, obj.id))
with self.feature(self.features):
response = self.client.delete(url)
assert response.status_code == 404
| OrganizationFlagsWebHookSigningSecretEndpointTestCase |
python | python-openxml__python-docx | tests/text/test_tabstops.py | {
"start": 5252,
"end": 11626
} | class ____:
def it_knows_its_length(self, len_fixture):
tab_stops, expected_value = len_fixture
assert len(tab_stops) == expected_value
def it_can_iterate_over_its_tab_stops(self, iter_fixture):
tab_stops, expected_count, tab_stop_, TabStop_, expected_calls = iter_fixture
count = 0
for tab_stop in tab_stops:
assert tab_stop is tab_stop_
count += 1
assert count == expected_count
assert TabStop_.call_args_list == expected_calls
def it_can_get_a_tab_stop_by_index(self, index_fixture):
tab_stops, idx, TabStop_, tab, tab_stop_ = index_fixture
tab_stop = tab_stops[idx]
TabStop_.assert_called_once_with(tab)
assert tab_stop is tab_stop_
def it_raises_on_indexed_access_when_empty(self):
tab_stops = TabStops(element("w:pPr"))
with pytest.raises(IndexError):
tab_stops[0]
def it_can_add_a_tab_stop(self, add_tab_fixture):
tab_stops, position, kwargs, expected_xml = add_tab_fixture
tab_stops.add_tab_stop(position, **kwargs)
assert tab_stops._element.xml == expected_xml
def it_can_delete_a_tab_stop(self, del_fixture):
tab_stops, idx, expected_xml = del_fixture
del tab_stops[idx]
assert tab_stops._element.xml == expected_xml
def it_raises_on_del_idx_invalid(self, del_raises_fixture):
tab_stops, idx = del_raises_fixture
with pytest.raises(IndexError) as exc:
del tab_stops[idx]
assert exc.value.args[0] == "tab index out of range"
def it_can_clear_all_its_tab_stops(self, clear_all_fixture):
tab_stops, expected_xml = clear_all_fixture
tab_stops.clear_all()
assert tab_stops._element.xml == expected_xml
# fixture --------------------------------------------------------
@pytest.fixture(
params=[
"w:pPr",
"w:pPr/w:tabs/w:tab{w:pos=42}",
"w:pPr/w:tabs/(w:tab{w:pos=24},w:tab{w:pos=42})",
]
)
def clear_all_fixture(self, request):
pPr_cxml = request.param
tab_stops = TabStops(element(pPr_cxml))
expected_xml = xml("w:pPr")
return tab_stops, expected_xml
@pytest.fixture(
params=[
("w:pPr/w:tabs/w:tab{w:pos=42}", 0, "w:pPr"),
(
"w:pPr/w:tabs/(w:tab{w:pos=24},w:tab{w:pos=42})",
0,
"w:pPr/w:tabs/w:tab{w:pos=42}",
),
(
"w:pPr/w:tabs/(w:tab{w:pos=24},w:tab{w:pos=42})",
1,
"w:pPr/w:tabs/w:tab{w:pos=24}",
),
]
)
def del_fixture(self, request):
pPr_cxml, idx, expected_cxml = request.param
tab_stops = TabStops(element(pPr_cxml))
expected_xml = xml(expected_cxml)
return tab_stops, idx, expected_xml
@pytest.fixture(
params=[
("w:pPr", 0),
("w:pPr/w:tabs/w:tab{w:pos=42}", 1),
]
)
def del_raises_fixture(self, request):
tab_stops_cxml, idx = request.param
tab_stops = TabStops(element(tab_stops_cxml))
return tab_stops, idx
@pytest.fixture(
params=[
("w:pPr", Twips(42), {}, "w:pPr/w:tabs/w:tab{w:pos=42,w:val=left}"),
(
"w:pPr",
Twips(72),
{"alignment": WD_TAB_ALIGNMENT.RIGHT},
"w:pPr/w:tabs/w:tab{w:pos=72,w:val=right}",
),
(
"w:pPr",
Twips(24),
{"alignment": WD_TAB_ALIGNMENT.CENTER, "leader": WD_TAB_LEADER.DOTS},
"w:pPr/w:tabs/w:tab{w:pos=24,w:val=center,w:leader=dot}",
),
(
"w:pPr/w:tabs/w:tab{w:pos=42}",
Twips(72),
{},
"w:pPr/w:tabs/(w:tab{w:pos=42},w:tab{w:pos=72,w:val=left})",
),
(
"w:pPr/w:tabs/w:tab{w:pos=42}",
Twips(24),
{},
"w:pPr/w:tabs/(w:tab{w:pos=24,w:val=left},w:tab{w:pos=42})",
),
(
"w:pPr/w:tabs/w:tab{w:pos=42}",
Twips(42),
{},
"w:pPr/w:tabs/(w:tab{w:pos=42},w:tab{w:pos=42,w:val=left})",
),
]
)
def add_tab_fixture(self, request):
pPr_cxml, position, kwargs, expected_cxml = request.param
tab_stops = TabStops(element(pPr_cxml))
expected_xml = xml(expected_cxml)
return tab_stops, position, kwargs, expected_xml
@pytest.fixture(
params=[
("w:pPr/w:tabs/w:tab{w:pos=0}", 0),
("w:pPr/w:tabs/(w:tab{w:pos=1},w:tab{w:pos=2},w:tab{w:pos=3})", 1),
("w:pPr/w:tabs/(w:tab{w:pos=4},w:tab{w:pos=5},w:tab{w:pos=6})", 2),
]
)
def index_fixture(self, request, TabStop_, tab_stop_):
pPr_cxml, idx = request.param
pPr = element(pPr_cxml)
tab = pPr.xpath("./w:tabs/w:tab")[idx]
tab_stops = TabStops(pPr)
return tab_stops, idx, TabStop_, tab, tab_stop_
@pytest.fixture(
params=[
("w:pPr", 0),
("w:pPr/w:tabs/w:tab{w:pos=2880}", 1),
("w:pPr/w:tabs/(w:tab{w:pos=2880},w:tab{w:pos=5760})", 2),
]
)
def iter_fixture(self, request, TabStop_, tab_stop_):
pPr_cxml, expected_count = request.param
pPr = element(pPr_cxml)
tab_elms = pPr.xpath("//w:tab")
tab_stops = TabStops(pPr)
expected_calls = [call(tab) for tab in tab_elms]
return tab_stops, expected_count, tab_stop_, TabStop_, expected_calls
@pytest.fixture(
params=[
("w:pPr", 0),
("w:pPr/w:tabs/w:tab{w:pos=2880}", 1),
]
)
def len_fixture(self, request):
tab_stops_cxml, expected_value = request.param
tab_stops = TabStops(element(tab_stops_cxml))
return tab_stops, expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def TabStop_(self, request, tab_stop_):
return class_mock(request, "docx.text.tabstops.TabStop", return_value=tab_stop_)
@pytest.fixture
def tab_stop_(self, request):
return instance_mock(request, TabStop)
| DescribeTabStops |
python | realpython__materials | python-class/crafts.py | {
"start": 412,
"end": 495
} | class ____(Vehicle):
def drive(self):
print("Driving on the road...")
| Car |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 60035,
"end": 60422
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('mode', _nvmlProcessMode_t),
('numProcArrayEntries', c_uint),
('procArray', POINTER(c_nvmlProcessDetail_v1_t)),
]
_fmt_ = {'numProcArrayEntries': "%d B"}
c_nvmlProcessDetailList_t = c_nvmlProcessDetailList_v1_t
nvmlProcessDetailList_v1 = 0x1000018
| c_nvmlProcessDetailList_v1_t |
python | zarr-developers__zarr-python | src/zarr/core/dtype/npy/time.py | {
"start": 18503,
"end": 27921
} | class ____(TimeDTypeBase[np.dtypes.DateTime64DType, np.datetime64], HasEndianness):
"""
A Zarr data type for arrays containing NumPy Datetime64 data.
Wraps the ``np.dtypes.TimeDelta64DType`` data type. Scalars for this data type
are instances of ``np.datetime64``.
Attributes
----------
dtype_cls : Type[np.dtypesTimeDelta64DType]
The numpy dtype class for this data type.
unit : DateTimeUnit
The unit of time for this data type.
scale_factor : int
The scale factor for the time unit.
References
----------
The Zarr V2 representation of this data type is defined in the Zarr V2
[specification document](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding).
The Zarr V3 representation of this data type is defined in the ``numpy.datetime64``
[specification document](https://github.com/zarr-developers/zarr-extensions/tree/main/data-types/numpy.datetime64)
"""
dtype_cls = np.dtypes.DateTime64DType # type: ignore[assignment]
_zarr_v3_name: ClassVar[Literal["numpy.datetime64"]] = "numpy.datetime64"
_zarr_v2_names: ClassVar[tuple[Literal[">M8"], Literal["<M8"]]] = (">M8", "<M8")
_numpy_name: ClassVar[Literal["datetime64"]] = "datetime64"
unit: DateTimeUnit = "generic"
scale_factor: int = 1
@classmethod
def _check_json_v2(cls, data: DTypeJSON) -> TypeGuard[DateTime64JSON_V2]:
"""
Check that the input is a valid JSON representation of this data type.
Parameters
----------
data : DTypeJSON
The JSON data to check.
Returns
-------
TypeGuard[DateTime64JSON_V2]
True if the input is a valid JSON representation of a NumPy datetime64 data type,
otherwise False.
"""
if not check_dtype_spec_v2(data):
return False
name = data["name"]
if not isinstance(name, str):
return False
if not name.startswith(cls._zarr_v2_names):
return False
if len(name) == 3:
# no unit, and
# we already checked that this string is either <M8 or >M8
return True
else:
return name[4:-1].endswith(DATETIME_UNIT) and name[-1] == "]"
@classmethod
def _check_json_v3(cls, data: DTypeJSON) -> TypeGuard[DateTime64JSON_V3]:
"""
Check that the input is a valid JSON representation of this class in Zarr V3.
Parameters
----------
data : DTypeJSON
The JSON data to check.
Returns
-------
TypeGuard[DateTime64JSON_V3]
True if the input is a valid JSON representation of a numpy datetime64 data type in Zarr V3, False otherwise.
"""
return (
isinstance(data, dict)
and set(data.keys()) == {"name", "configuration"}
and data["name"] == cls._zarr_v3_name
and isinstance(data["configuration"], dict)
and set(data["configuration"].keys()) == {"unit", "scale_factor"}
)
@classmethod
def _from_json_v2(cls, data: DTypeJSON) -> Self:
"""
Create an instance of this data type from a Zarr V2-flavored JSON representation.
This method checks if the provided JSON data is a valid representation of this class.
If valid, it creates an instance using the native NumPy dtype. Otherwise, it raises a
DataTypeValidationError.
Parameters
----------
data : DTypeJSON
The JSON data to parse.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class.
"""
if cls._check_json_v2(data):
name = data["name"]
return cls.from_native_dtype(np.dtype(name))
msg = (
f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected a string "
f"representation of an instance of {cls.dtype_cls}"
)
raise DataTypeValidationError(msg)
@classmethod
def _from_json_v3(cls, data: DTypeJSON) -> Self:
"""
Create an instance of this data type from a Zarr V3-flavored JSON representation.
This method checks if the provided JSON data is a valid representation of this class.
If valid, it creates an instance using the native NumPy dtype. Otherwise, it raises a
DataTypeValidationError.
Parameters
----------
data : DTypeJSON
The JSON data to parse.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class.
"""
if cls._check_json_v3(data):
unit = data["configuration"]["unit"]
scale_factor = data["configuration"]["scale_factor"]
return cls(unit=unit, scale_factor=scale_factor)
msg = (
f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected a dict "
f"with a 'name' key with the value 'numpy.datetime64', "
"and a 'configuration' key with a value of a dict with a 'unit' key and a "
"'scale_factor' key"
)
raise DataTypeValidationError(msg)
@overload
def to_json(self, zarr_format: Literal[2]) -> DateTime64JSON_V2: ...
@overload
def to_json(self, zarr_format: Literal[3]) -> DateTime64JSON_V3: ...
def to_json(self, zarr_format: ZarrFormat) -> DateTime64JSON_V2 | DateTime64JSON_V3:
"""
Serialize this data type to JSON.
Parameters
----------
zarr_format : ZarrFormat
The Zarr format version (2 or 3).
Returns
-------
DateTime64JSON_V2 | DateTime64JSON_V3
The JSON representation of the data type.
Raises
------
ValueError
If the zarr_format is not 2 or 3.
"""
if zarr_format == 2:
name = self.to_native_dtype().str
return {"name": name, "object_codec_id": None}
elif zarr_format == 3:
return {
"name": self._zarr_v3_name,
"configuration": {"unit": self.unit, "scale_factor": self.scale_factor},
}
raise ValueError(f"zarr_format must be 2 or 3, got {zarr_format}") # pragma: no cover
def _check_scalar(self, data: object) -> TypeGuard[DateTimeLike]:
"""
Check if the input is convertible to a scalar of this data type.
Parameters
----------
data : object
The object to check.
Returns
-------
TypeGuard[DateTimeLike]
True if the input is a scalar of this data type, False otherwise.
"""
if data is None:
return True
return isinstance(data, str | int | bytes | np.datetime64 | datetime)
def _cast_scalar_unchecked(self, data: DateTimeLike) -> np.datetime64:
"""
Cast the input to a scalar of this data type without any type checking.
Parameters
----------
data : DateTimeLike
The scalar data to cast.
Returns
-------
numpy.datetime64
The input cast to a NumPy datetime scalar.
"""
return self.to_native_dtype().type(data, f"{self.scale_factor}{self.unit}")
def cast_scalar(self, data: object) -> np.datetime64:
"""
Cast the input to a scalar of this data type after a type check.
Parameters
----------
data : object
The scalar value to cast.
Returns
-------
numpy.datetime64
The input cast to a NumPy datetime scalar.
Raises
------
TypeError
If the data cannot be converted to a numpy datetime scalar.
"""
if self._check_scalar(data):
return self._cast_scalar_unchecked(data)
msg = (
f"Cannot convert object {data!r} with type {type(data)} to a scalar compatible with the "
f"data type {self}."
)
raise TypeError(msg)
def default_scalar(self) -> np.datetime64:
"""
Return the default scalar value for this data type.
Returns
-------
numpy.datetime64
The default scalar value, which is a 'Not-a-Time' (NaT) value
"""
return np.datetime64("NaT")
def from_json_scalar(self, data: JSON, *, zarr_format: ZarrFormat) -> np.datetime64:
"""
Read a JSON-serializable value as a scalar.
Parameters
----------
data : JSON
The JSON-serializable value.
zarr_format : ZarrFormat
The zarr format version.
Returns
-------
numpy.datetime64
The numpy datetime scalar.
Raises
------
TypeError
If the input is not a valid integer type.
"""
if check_json_time(data):
return self._cast_scalar_unchecked(data)
raise TypeError(f"Invalid type: {data}. Expected an integer.") # pragma: no cover
| DateTime64 |
python | joke2k__faker | faker/providers/automotive/de_DE/__init__.py | {
"start": 63,
"end": 6445
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``de_DE`` locale.
Sources:
- http://berlin.de/daten/liste-der-kfz-kennzeichen/kfz-kennz-d.csv
"""
license_plate_prefix = (
"A",
"AA",
"AB",
"ABI",
"ABG",
"AC",
"AE",
"AIC",
"AK",
"AM",
"AN",
"AÖ",
"AP",
"AS",
"AUR",
"AW",
"AZ",
"B",
"BA",
"BAD",
"BAR",
"BB",
"BC",
"BD",
"BGL",
"BI",
"BIR",
"BIT",
"BK",
"BL",
"BLK",
"BM",
"BN",
"BO",
"BOR",
"BOT",
"BP",
"BRA",
"BRB",
"BS",
"BT",
"BTF",
"BÜS",
"BW",
"BWL",
"BYL",
"BZ",
"C",
"CB",
"CE",
"CHA",
"CO",
"COC",
"COE",
"CUX",
"CW",
"D",
"DA",
"DAH",
"DAN",
"DAU",
"DBR",
"DD",
"DE",
"DEG",
"DEL",
"DGF",
"DH",
"DL",
"DLG",
"DN",
"Do",
"DON",
"DU",
"DÜW",
"E",
"EA",
"EB",
"EBE",
"ED",
"EE",
"EF",
"EI",
"EIC",
"EL",
"EM",
"EMD",
"EMS",
"EN",
"ER",
"ERB",
"ERH",
"ERZ",
"ES",
"ESW",
"EU",
"F",
"FB",
"FD",
"FDS",
"FF",
"FFB",
"FG",
"FL",
"FN",
"FO",
"FR",
"FRG",
"FRI",
"FS",
"FT",
"FÜ",
"G",
"GAP",
"GE",
"GER",
"GF",
"GG",
"GI",
"GL",
"GM",
"GÖ",
"GP",
"GR",
"GRZ",
"GS",
"GT",
"GTH",
"GÜ",
"GZ",
"H",
"HA",
"HAL",
"HAM",
"HAS",
"HB",
"HBN",
"HD",
"HDH",
"HE",
"HEF",
"HEI",
"HEL",
"HER",
"HF",
"HG",
"HGW",
"HH",
"HI",
"HL",
"HM",
"HN",
"HO",
"HOL",
"HOM",
"HP",
"HR",
"HRO",
"HS",
"HSK",
"HST",
"HU",
"HVL",
"HWI",
"HX",
"HZ",
"IGB",
"IK",
"IN",
"IZ",
"J",
"JL",
"K",
"KA",
"KB",
"KC",
"KE",
"KEH",
"KF",
"KG",
"KH",
"KI",
"KIB",
"KL",
"KLE",
"KN",
"KO",
"KR",
"KS",
"KT",
"KU",
"KÜN",
"KUS",
"KYF",
"L",
"LA",
"LAU",
"LB",
"LD",
"LDK",
"LDS",
"LER",
"LEV",
"LG",
"LI",
"LIF",
"LIP",
"LL",
"LM",
"LÖ",
"LOS",
"LRO",
"LSA",
"LSN",
"LU",
"LWL",
"M",
"MA",
"MB",
"MD",
"ME",
"MEI",
"MG",
"MI",
"MIL",
"MK",
"MKK",
"MM",
"MN",
"MOL",
"MOS",
"MR",
"MS",
"MSH",
"MSP",
"MST",
"MTK",
"MÜ",
"MÜR",
"MVL",
"MYK",
"MZ",
"MZG",
"N",
"NB",
"ND",
"NDH",
"NE",
"NEA",
"NES",
"NEW",
"NF",
"NI",
"NK",
"NL",
"NM",
"NMS",
"NOH",
"NOM",
"NR",
"NU",
"NVP",
"NW",
"NWM",
"OA",
"OAL",
"OB",
"OD",
"OE",
"OF",
"OG",
"OH",
"OHA",
"OHV",
"OHZ",
"OL",
"OPR",
"OS",
"OSL",
"OVP",
"P",
"PA",
"PAF",
"PAN",
"PB",
"PCH",
"PE",
"PF",
"PI",
"PIR",
"PLÖ",
"PM",
"PR",
"PS",
"R",
"RA",
"RD",
"RE",
"REG",
"RO",
"ROS",
"ROW",
"RP",
"RPL",
"RS",
"RT",
"RÜD",
"RÜG",
"RV",
"RW",
"RZ",
"S",
"SAD",
"SAL",
"SAW",
"SB",
"SC",
"SDL",
"SE",
"SG",
"SH",
"SHA",
"SHG",
"SHK",
"SHL",
"SI",
"SIG",
"SIM",
"SK",
"SL",
"SLF",
"SLK",
"SLS",
"SM",
"SN",
"SO",
"SOK",
"SÖM",
"SON",
"SP",
"SPN",
"SR",
"ST",
"STA",
"STD",
"SU",
"SÜW",
"SW",
"SZ",
"TDO",
"TBB",
"TF",
"TG",
"THL",
"THW",
"TIR",
"TÖL",
"TR",
"TS",
"TÜ",
"TUT",
"UE",
"UL",
"UM",
"UN",
"V",
"VB",
"VEC",
"VER",
"VIE",
"VK",
"VR",
"VS",
"W",
"WAF",
"WAK",
"WB",
"WE",
"WEN",
"WES",
"WF",
"WHV",
"WI",
"WIL",
"WL",
"WM",
"WN",
"WND",
"WO",
"WOB",
"WST",
"WT",
"WTM",
"WÜ",
"WUG",
"WUN",
"WW",
"WZ",
"Y",
"Z",
"ZW",
)
license_plate_suffix = (
"-??-%@@@",
"-?-%@@@",
)
def license_plate(self) -> str:
"""Generate a license plate."""
prefix: str = self.random_element(self.license_plate_prefix)
suffix = self.bothify(
self.random_element(self.license_plate_suffix),
letters=string.ascii_uppercase,
)
return prefix + suffix
| Provider |
python | scrapy__scrapy | tests/test_command_parse.py | {
"start": 847,
"end": 943
} | class ____(scrapy.Spider):
custom_settings = {{
"DOWNLOAD_DELAY": 0,
}}
| BaseSpider |
python | apache__thrift | lib/py/src/TTornado.py | {
"start": 5587,
"end": 6908
} | class ____(tcpserver.TCPServer):
def __init__(self, processor, iprot_factory, oprot_factory=None,
*args, **kwargs):
super(TTornadoServer, self).__init__(*args, **kwargs)
self._processor = processor
self._iprot_factory = iprot_factory
self._oprot_factory = (oprot_factory if oprot_factory is not None
else iprot_factory)
@gen.coroutine
def handle_stream(self, stream, address):
host, port = address[:2]
trans = TTornadoStreamTransport(host=host, port=port, stream=stream)
oprot = self._oprot_factory.getProtocol(trans)
try:
while not trans.stream.closed():
try:
frame = yield trans.readFrame()
except TTransportException as e:
if e.type == TTransportException.END_OF_FILE:
break
else:
raise
tr = TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
yield self._processor.process(iprot, oprot)
except Exception:
logger.exception('thrift exception in handle_stream')
trans.close()
logger.info('client disconnected %s:%d', host, port)
| TTornadoServer |
python | huggingface__transformers | src/transformers/models/informer/modular_informer.py | {
"start": 2011,
"end": 2080
} | class ____(TimeSeriesFeatureEmbedder):
pass
| InformerFeatureEmbedder |
python | kamyu104__LeetCode-Solutions | Python/merge-operations-to-turn-array-into-a-palindrome.py | {
"start": 77,
"end": 702
} | class ____(object):
def minimumOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
left, right = 0, len(nums)-1
l, r = nums[left], nums[right]
while left < right:
if l == r:
left += 1
right -= 1
l, r = nums[left], nums[right]
continue
if l < r:
left += 1
l += nums[left]
else:
right -= 1
r += nums[right]
result += 1
return result
| Solution |
python | astropy__astropy | astropy/table/tests/test_pprint.py | {
"start": 5197,
"end": 12156
} | class ____:
def _setup(self, table_type):
self.tb = table_type(BIG_WIDE_ARR)
self.tb["col0"].format = "e"
self.tb["col1"].format = ".6f"
self.tb["col0"].unit = "km**2"
self.tb["col19"].unit = "kg s m**-2"
self.ts = table_type(SMALL_ARR)
def test_empty_table(self, table_type):
t = table_type()
lines = t.pformat()
assert lines == ["<No columns>"]
c = repr(t)
masked = "masked=True " if t.masked else ""
assert c.splitlines() == [
f"<{table_type.__name__} {masked}length=0>",
"<No columns>",
]
def test_format0(self, table_type):
"""Try getting screen size but fail to defaults because testing doesn't
have access to screen (fcntl.ioctl fails).
"""
self._setup(table_type)
arr = np.arange(4000, dtype=np.float64).reshape(100, 40)
with conf.set_temp("max_width", None), conf.set_temp("max_lines", None):
lines = table_type(arr).pformat(max_lines=None, max_width=None)
width, nlines = get_terminal_size()
assert len(lines) == nlines
for line in lines[:-1]: # skip last "Length = .. rows" line
assert width - 10 < len(line) <= width
def test_format1(self, table_type):
"""Basic test of formatting, unit header row included"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40)
assert lines == [
" col0 col1 ... col19 ",
" km2 ... kg s / m2",
"------------ ----------- ... ---------",
"0.000000e+00 1.000000 ... 19.0",
" ... ... ... ...",
"1.960000e+03 1961.000000 ... 1979.0",
"1.980000e+03 1981.000000 ... 1999.0",
"Length = 100 rows",
]
def test_format2(self, table_type):
"""Basic test of formatting, unit header row excluded"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=False)
assert lines == [
" col0 col1 ... col19 ",
"------------ ----------- ... ------",
"0.000000e+00 1.000000 ... 19.0",
"2.000000e+01 21.000000 ... 39.0",
" ... ... ... ...",
"1.960000e+03 1961.000000 ... 1979.0",
"1.980000e+03 1981.000000 ... 1999.0",
"Length = 100 rows",
]
def test_format3(self, table_type):
"""Include the unit header row"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=True)
assert lines == [
" col0 col1 ... col19 ",
" km2 ... kg s / m2",
"------------ ----------- ... ---------",
"0.000000e+00 1.000000 ... 19.0",
" ... ... ... ...",
"1.960000e+03 1961.000000 ... 1979.0",
"1.980000e+03 1981.000000 ... 1999.0",
"Length = 100 rows",
]
def test_format4(self, table_type):
"""Do not include the name header row"""
self._setup(table_type)
lines = self.tb.pformat(max_lines=8, max_width=40, show_name=False)
assert lines == [
" km2 ... kg s / m2",
"------------ ----------- ... ---------",
"0.000000e+00 1.000000 ... 19.0",
"2.000000e+01 21.000000 ... 39.0",
" ... ... ... ...",
"1.960000e+03 1961.000000 ... 1979.0",
"1.980000e+03 1981.000000 ... 1999.0",
"Length = 100 rows",
]
def test_noclip(self, table_type):
"""Basic table print"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=-1, max_width=-1)
assert lines == [
"col0 col1 col2",
"---- ---- ----",
" 0 1 2",
" 3 4 5",
" 6 7 8",
" 9 10 11",
" 12 13 14",
" 15 16 17",
]
def test_clip1(self, table_type):
"""max lines below hard limit of 8"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=3, max_width=-1)
assert lines == [
"col0 col1 col2",
"---- ---- ----",
" 0 1 2",
" 3 4 5",
" 6 7 8",
" 9 10 11",
" 12 13 14",
" 15 16 17",
]
def test_clip2(self, table_type):
"""max lines below hard limit of 8 and output longer than 8"""
self._setup(table_type)
lines = self.ts.pformat(
max_lines=3, max_width=-1, show_unit=True, show_dtype=True
)
assert lines == [
" col0 col1 col2",
" ",
"int64 int64 int64",
"----- ----- -----",
" 0 1 2",
" ... ... ...",
" 15 16 17",
"Length = 6 rows",
]
def test_clip3(self, table_type):
"""Max lines below hard limit of 8 and max width below hard limit
of 10
"""
self._setup(table_type)
lines = self.ts.pformat(max_lines=3, max_width=1, show_unit=True)
assert lines == [
"col0 ...",
" ...",
"---- ...",
" 0 ...",
" ... ...",
" 12 ...",
" 15 ...",
"Length = 6 rows",
]
def test_clip4(self, table_type):
"""Test a range of max_lines"""
self._setup(table_type)
for max_lines in (0, 1, 4, 5, 6, 7, 8, 100, 101, 102, 103, 104, 130):
lines = self.tb.pformat(max_lines=max_lines, show_unit=False)
assert len(lines) == max(8, min(102, max_lines))
def test_pformat_all(self, table_type):
"""Test that all rows are printed by default"""
self._setup(table_type)
with pytest.warns(
AstropyDeprecationWarning,
match=(
r"The pformat_all function is deprecated "
r"and may be removed in a future version\."
),
):
lines = self.tb.pformat_all()
# +3 accounts for the three header lines in this table
assert len(lines) == BIG_WIDE_ARR.shape[0] + 3
def test_pprint_all(self, table_type, capsys):
"""Test that all rows are printed by default"""
self._setup(table_type)
self.tb.pprint_all()
(out, err) = capsys.readouterr()
# +3 accounts for the three header lines in this table
assert len(out.splitlines()) == BIG_WIDE_ARR.shape[0] + 3
| TestPprint |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 11932,
"end": 12188
} | class ____(wtforms.Form):
response = wtforms.TextAreaField(
validators=[
wtforms.validators.InputRequired(
message=_("Provide your response to the request.")
)
]
)
| InformationRequestResponseForm |
python | Pylons__pyramid | tests/test_config/__init__.py | {
"start": 1016,
"end": 1166
} | class ____:
def __call__(self, config, discrim):
config.action(discrim, None, config.package)
dummy_callable = DummyCallable()
| DummyCallable |
python | kamyu104__LeetCode-Solutions | Python/design-search-autocomplete-system.py | {
"start": 179,
"end": 860
} | class ____(object):
def __init__(self):
self.__TOP_COUNT = 3
self.infos = []
self.leaves = {}
def insert(self, s, times):
cur = self
cur.add_info(s, times)
for c in s:
if c not in cur.leaves:
cur.leaves[c] = TrieNode()
cur = cur.leaves[c]
cur.add_info(s, times)
def add_info(self, s, times):
for p in self.infos:
if p[1] == s:
p[0] = -times
break
else:
self.infos.append([-times, s])
self.infos.sort()
if len(self.infos) > self.__TOP_COUNT:
self.infos.pop()
| TrieNode |
python | django__django | tests/admin_views/admin.py | {
"start": 3923,
"end": 4339
} | class ____(admin.ModelAdmin):
def get_urls(self):
urlpatterns = super().get_urls()
urlpatterns.append(
path(
"extra.json",
self.admin_site.admin_view(self.extra_json),
name="article_extra_json",
)
)
return urlpatterns
def extra_json(self, request):
return JsonResponse({})
| ArticleAdminWithExtraUrl |
python | readthedocs__readthedocs.org | readthedocs/allauth/providers/githubapp/views.py | {
"start": 293,
"end": 518
} | class ____(GitHubOAuth2Adapter):
provider_id = "githubapp"
oauth2_login = OAuth2LoginView.adapter_view(GitHubAppOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(GitHubAppOAuth2Adapter)
| GitHubAppOAuth2Adapter |
python | ray-project__ray | rllib/algorithms/impala/impala_learner.py | {
"start": 1848,
"end": 12425
} | class ____(Learner):
@override(Learner)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Ray metrics
self._metrics_learner_impala_update = Histogram(
name="rllib_learner_impala_update_time",
description="Time spent in the 'IMPALALearner.update()' method.",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_learner_impala_update.set_default_tags(
{"rllib": self.__class__.__name__}
)
self._metrics_learner_impala_update_solve_refs = Histogram(
name="rllib_learner_impala_update_solve_refs_time",
description="Time spent on resolving refs in the 'Learner.update()'",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_learner_impala_update_solve_refs.set_default_tags(
{"rllib": self.__class__.__name__}
)
self._metrics_learner_impala_update_make_batch_if_necessary = Histogram(
name="rllib_learner_impala_update_make_batch_if_necessary_time",
description="Time spent on making a batch in the 'Learner.update()'.",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_learner_impala_update_make_batch_if_necessary.set_default_tags(
{"rllib": self.__class__.__name__}
)
self._metrics_learner_impala_get_learner_state_time = Histogram(
name="rllib_learner_impala_get_learner_state_time",
description="Time spent on get_state() in IMPALALearner.update().",
boundaries=DEFAULT_HISTOGRAM_BOUNDARIES_SHORT_EVENTS,
tag_keys=("rllib",),
)
self._metrics_learner_impala_get_learner_state_time.set_default_tags(
{"rllib": self.__class__.__name__}
)
@override(Learner)
def build(self) -> None:
super().build()
# TODO (sven): We replace the dummy RLock here for APPO/IMPALA, b/c these algos
# require this for thread safety reasons.
# An RLock breaks our current OfflineData and OfflinePreLearner logic, in which
# the Learner (which contains a MetricsLogger) is serialized and deserialized.
# We will have to fix this offline RL logic first, then can remove this hack
# here and return to always using the RLock.
self.metrics._threading_lock = threading.RLock()
self._num_updates = 0
self._num_updates_lock = threading.Lock()
# Dict mapping module IDs to the respective entropy Scheduler instance.
self.entropy_coeff_schedulers_per_module: Dict[
ModuleID, Scheduler
] = LambdaDefaultDict(
lambda module_id: Scheduler(
fixed_value_or_schedule=(
self.config.get_config_for_module(module_id).entropy_coeff
),
framework=self.framework,
device=self._device,
)
)
# Create and start the GPU-loader thread. It picks up train-ready batches from
# the "GPU-loader queue" and loads them to the GPU, then places the GPU batches
# on the "update queue" for the actual RLModule forward pass and loss
# computations.
self._gpu_loader_in_queue = queue.Queue()
# Default is to have a learner thread.
if not hasattr(self, "_learner_thread_in_queue"):
self._learner_thread_in_queue = deque(maxlen=self.config.learner_queue_size)
# TODO (sven): Figure out a way to use a results queue instaad of the "reduce
# metrics each 20 updates" logic right now.
# # Results queue for reduced Learner metrics.
# # self._learner_thread_out_queue = deque(maxlen=1)
# Create and start the GPU loader thread(s).
if self.config.num_gpus_per_learner > 0:
self._gpu_loader_threads = [
_GPULoaderThread(
in_queue=self._gpu_loader_in_queue,
out_queue=self._learner_thread_in_queue,
device=self._device,
metrics_logger=self.metrics,
)
for _ in range(self.config.num_gpu_loader_threads)
]
for t in self._gpu_loader_threads:
t.start()
# Create and start the Learner thread.
self._learner_thread = _LearnerThread(
update_method=Learner.update,
in_queue=self._learner_thread_in_queue,
# TODO (sven): Figure out a way to use a results queue instaad of the "reduce
# metrics each 20 updates" logic right now.
# out_queue=self._learner_thread_out_queue,
learner=self,
)
self._learner_thread.start()
@override(Learner)
def update(
self,
training_data: TrainingData,
*,
timesteps: Dict[str, Any],
return_state: bool = False,
**kwargs,
) -> ResultDict:
"""
Args:
batch:
timesteps:
return_state: Whether to include one of the Learner worker's state from
after the update step in the returned results dict (under the
`_rl_module_state_after_update` key). Note that after an update, all
Learner workers' states should be identical, so we use the first
Learner's state here. Useful for avoiding an extra `get_weights()` call,
e.g. for synchronizing EnvRunner weights.
**kwargs:
Returns:
"""
global _CURRENT_GLOBAL_TIMESTEPS
_CURRENT_GLOBAL_TIMESTEPS = timesteps or {}
with TimerAndPrometheusLogger(self._metrics_learner_impala_update):
# Get the train batch from the object store.
with TimerAndPrometheusLogger(
self._metrics_learner_impala_update_solve_refs
):
training_data.solve_refs()
with TimerAndPrometheusLogger(
self._metrics_learner_impala_update_make_batch_if_necessary
):
batch = self._make_batch_if_necessary(training_data=training_data)
assert batch is not None
if self.config.num_gpus_per_learner > 0:
self._gpu_loader_in_queue.put(batch)
self.metrics.log_value(
(ALL_MODULES, QUEUE_SIZE_GPU_LOADER_QUEUE),
self._gpu_loader_in_queue.qsize(),
)
else:
if isinstance(self._learner_thread_in_queue, CircularBuffer):
ts_dropped = self._learner_thread_in_queue.add(batch)
self.metrics.log_value(
(ALL_MODULES, LEARNER_THREAD_ENV_STEPS_DROPPED),
ts_dropped,
reduce="sum",
)
else:
# Enqueue to Learner thread's in-queue.
_LearnerThread.enqueue(
self._learner_thread_in_queue, batch, self.metrics
)
# TODO (sven): Find a better way to limit the number of (mostly) unnecessary
# metrics reduces.
with self._num_updates_lock:
count = self._num_updates
result = {}
if count >= 20:
with self._num_updates_lock:
self._num_updates = 0
result = self.metrics.reduce()
if return_state:
with TimerAndPrometheusLogger(
self._metrics_learner_impala_get_learner_state_time
):
learner_state = self.get_state(
# Only return the state of those RLModules that are trainable.
components=[
COMPONENT_RL_MODULE + "/" + mid
for mid in self.module.keys()
if self.should_module_be_updated(mid)
],
inference_only=True,
)
learner_state[COMPONENT_RL_MODULE] = ray.put(
learner_state[COMPONENT_RL_MODULE]
)
result["_rl_module_state_after_update"] = learner_state
return result
# TODO (sven): Figure out a way to use a results queue instaad of the "reduce
# metrics each 20 updates" logic right now.
# try:
# result = self._learner_thread_out_queue.popleft()
# except IndexError:
# result = {}
# if return_state:
# learner_state = self.get_state(
# # Only return the state of those RLModules that are trainable.
# components=[
# COMPONENT_RL_MODULE + "/" + mid
# for mid in self.module.keys()
# if self.should_module_be_updated(mid)
# ],
# inference_only=True,
# )
# learner_state[COMPONENT_RL_MODULE] = ray.put(
# learner_state[COMPONENT_RL_MODULE]
# )
# result["_rl_module_state_after_update"] = learner_state
# return result
@OverrideToImplementCustomLogic_CallToSuperRecommended
def before_gradient_based_update(self, *, timesteps: Dict[str, Any]) -> None:
super().before_gradient_based_update(timesteps=timesteps)
for module_id in self.module.keys():
# Update entropy coefficient via our Scheduler.
new_entropy_coeff = self.entropy_coeff_schedulers_per_module[
module_id
].update(timestep=timesteps.get(NUM_ENV_STEPS_SAMPLED_LIFETIME, 0))
self.metrics.log_value(
(module_id, LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY),
new_entropy_coeff,
window=1,
)
@override(Learner)
def remove_module(self, module_id: str):
super().remove_module(module_id)
self.entropy_coeff_schedulers_per_module.pop(module_id)
@classmethod
@override(Learner)
def rl_module_required_apis(cls) -> list[type]:
# In order for a PPOLearner to update an RLModule, it must implement the
# following APIs:
return [ValueFunctionAPI]
ImpalaLearner = IMPALALearner
| IMPALALearner |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_convert_dtypes.py | {
"start": 145,
"end": 9314
} | class ____:
@pytest.mark.parametrize(
"convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")]
)
def test_convert_dtypes(self, convert_integer, expected, string_storage):
# Specific types are tested in tests/series/test_dtypes.py
# Just check that it works for DataFrame here
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
"b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
}
)
with pd.option_context("string_storage", string_storage):
result = df.convert_dtypes(True, True, convert_integer, False)
expected = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype=expected),
"b": pd.Series(["x", "y", "z"], dtype=f"string[{string_storage}]"),
}
)
tm.assert_frame_equal(result, expected)
def test_convert_empty(self):
# Empty DataFrame can pass convert_dtypes, see GH#40393
empty_df = pd.DataFrame()
tm.assert_frame_equal(empty_df, empty_df.convert_dtypes())
@td.skip_if_no("pyarrow")
def test_convert_empty_categorical_to_pyarrow(self):
# GH#59934
df = pd.DataFrame(
{
"A": pd.Categorical([None] * 5),
"B": pd.Categorical([None] * 5, categories=["B1", "B2"]),
}
)
converted = df.convert_dtypes(dtype_backend="pyarrow")
expected = df
tm.assert_frame_equal(converted, expected)
def test_convert_dtypes_retain_column_names(self):
# GH#41435
df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
df.columns.name = "cols"
result = df.convert_dtypes()
tm.assert_index_equal(result.columns, df.columns)
assert result.columns.name == "cols"
def test_pyarrow_dtype_backend(self, using_nan_is_na):
pa = pytest.importorskip("pyarrow")
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
"b": pd.Series(["x", "y", None], dtype=np.dtype("O")),
"c": pd.Series([True, False, None], dtype=np.dtype("O")),
"d": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
"e": pd.Series(pd.date_range("2022", periods=3, unit="ns")),
"f": pd.Series(pd.date_range("2022", periods=3, tz="UTC").as_unit("s")),
"g": pd.Series(pd.timedelta_range("1D", periods=3)),
}
)
result = df.convert_dtypes(dtype_backend="pyarrow")
item = None if using_nan_is_na else np.nan
expected = pd.DataFrame(
{
"a": pd.arrays.ArrowExtensionArray(
pa.array([1, 2, 3], type=pa.int32())
),
"b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])),
"c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])),
"d": pd.arrays.ArrowExtensionArray(pa.array([item, 100.5, 200.0])),
"e": pd.arrays.ArrowExtensionArray(
pa.array(
[
datetime.datetime(2022, 1, 1),
datetime.datetime(2022, 1, 2),
datetime.datetime(2022, 1, 3),
],
type=pa.timestamp(unit="ns"),
)
),
"f": pd.arrays.ArrowExtensionArray(
pa.array(
[
datetime.datetime(2022, 1, 1),
datetime.datetime(2022, 1, 2),
datetime.datetime(2022, 1, 3),
],
type=pa.timestamp(unit="s", tz="UTC"),
)
),
"g": pd.arrays.ArrowExtensionArray(
pa.array(
[
datetime.timedelta(1),
datetime.timedelta(2),
datetime.timedelta(3),
],
type=pa.duration("ns"),
)
),
}
)
tm.assert_frame_equal(result, expected)
def test_pyarrow_dtype_backend_already_pyarrow(self):
pytest.importorskip("pyarrow")
expected = pd.DataFrame([1, 2, 3], dtype="int64[pyarrow]")
result = expected.convert_dtypes(dtype_backend="pyarrow")
tm.assert_frame_equal(result, expected)
def test_pyarrow_dtype_backend_from_pandas_nullable(self):
pa = pytest.importorskip("pyarrow")
df = pd.DataFrame(
{
"a": pd.Series([1, 2, None], dtype="Int32"),
"b": pd.Series(["x", "y", None], dtype="string[python]"),
"c": pd.Series([True, False, None], dtype="boolean"),
"d": pd.Series([None, 100.5, 200], dtype="Float64"),
}
)
result = df.convert_dtypes(dtype_backend="pyarrow")
expected = pd.DataFrame(
{
"a": pd.arrays.ArrowExtensionArray(
pa.array([1, 2, None], type=pa.int32())
),
"b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])),
"c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])),
"d": pd.arrays.ArrowExtensionArray(pa.array([None, 100.5, 200.0])),
}
)
tm.assert_frame_equal(result, expected)
def test_pyarrow_dtype_empty_object(self):
# GH 50970
pytest.importorskip("pyarrow")
expected = pd.DataFrame(columns=[0])
result = expected.convert_dtypes(dtype_backend="pyarrow")
tm.assert_frame_equal(result, expected)
def test_pyarrow_engine_lines_false(self):
# GH 48893
df = pd.DataFrame({"a": [1, 2, 3]})
msg = (
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
"'pyarrow' are allowed."
)
with pytest.raises(ValueError, match=msg):
df.convert_dtypes(dtype_backend="numpy")
def test_pyarrow_backend_no_conversion(self):
# GH#52872
pytest.importorskip("pyarrow")
df = pd.DataFrame({"a": [1, 2], "b": 1.5, "c": True, "d": "x"})
expected = df.copy()
result = df.convert_dtypes(
convert_floating=False,
convert_integer=False,
convert_boolean=False,
convert_string=False,
dtype_backend="pyarrow",
)
tm.assert_frame_equal(result, expected)
def test_convert_dtypes_pyarrow_to_np_nullable(self):
# GH 53648
pytest.importorskip("pyarrow")
ser = pd.DataFrame(range(2), dtype="int32[pyarrow]")
result = ser.convert_dtypes(dtype_backend="numpy_nullable")
expected = pd.DataFrame(range(2), dtype="Int32")
tm.assert_frame_equal(result, expected)
def test_convert_dtypes_pyarrow_timestamp(self):
# GH 54191
pytest.importorskip("pyarrow")
ser = pd.Series(pd.date_range("2020-01-01", "2020-01-02", freq="1min"))
expected = ser.astype("timestamp[ms][pyarrow]")
result = expected.convert_dtypes(dtype_backend="pyarrow")
tm.assert_series_equal(result, expected)
def test_convert_dtypes_avoid_block_splitting(self):
# GH#55341
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": "a"})
result = df.convert_dtypes(convert_integer=False)
expected = pd.DataFrame(
{
"a": [1, 2, 3],
"b": [4, 5, 6],
"c": pd.Series(["a"] * 3, dtype="string"),
}
)
tm.assert_frame_equal(result, expected)
assert result._mgr.nblocks == 2
def test_convert_dtypes_from_arrow(self):
# GH#56581
df = pd.DataFrame([["a", datetime.time(18, 12)]], columns=["a", "b"])
result = df.convert_dtypes()
expected = df.astype({"a": "string"})
tm.assert_frame_equal(result, expected)
def test_convert_dtype_pyarrow_timezone_preserve(self):
# GH 60237
pytest.importorskip("pyarrow")
df = pd.DataFrame(
{
"timestamps": pd.Series(
pd.to_datetime(range(5), utc=True, unit="h"),
dtype="timestamp[ns, tz=UTC][pyarrow]",
)
}
)
result = df.convert_dtypes(dtype_backend="pyarrow")
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_convert_dtypes_complex(self):
# GH 60129
df = pd.DataFrame({"a": [1.0 + 5.0j, 1.5 - 3.0j], "b": [1, 2]})
expected = pd.DataFrame(
{
"a": pd.array([1.0 + 5.0j, 1.5 - 3.0j], dtype="complex128"),
"b": pd.array([1, 2], dtype="Int64"),
}
)
result = df.convert_dtypes()
tm.assert_frame_equal(result, expected)
| TestConvertDtypes |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.