language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | django/templatetags/i18n.py | {
"start": 385,
"end": 659
} | class ____(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [
(k, translation.gettext(v)) for k, v in settings.LANGUAGES
]
return ""
| GetAvailableLanguagesNode |
python | huggingface__transformers | src/transformers/models/bert/modeling_bert.py | {
"start": 22166,
"end": 22473
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
| BertOnlyNSPHead |
python | ray-project__ray | doc/source/serve/doc_code/tutorial_sklearn.py | {
"start": 1332,
"end": 2231
} | class ____:
def __init__(self, model_path: str, label_path: str):
with open(model_path, "rb") as f:
self.model = pickle.load(f)
with open(label_path) as f:
self.label_list = json.load(f)
async def __call__(self, starlette_request: Request) -> Dict:
payload = await starlette_request.json()
print("Worker: received starlette request with data", payload)
input_vector = [
payload["sepal length"],
payload["sepal width"],
payload["petal length"],
payload["petal width"],
]
prediction = self.model.predict([input_vector])[0]
human_name = self.label_list[prediction]
return {"result": human_name}
# __doc_define_servable_end__
# __doc_deploy_begin__
boosting_model = BoostingModel.bind(MODEL_PATH, LABEL_PATH)
# __doc_deploy_end__
| BoostingModel |
python | python__mypy | mypy/plugin.py | {
"start": 8717,
"end": 9479
} | class ____:
"""
A common plugin API (shared between semantic analysis and type checking phases)
that all plugin hooks get independently of the context.
"""
# Global mypy options.
# Per-file options can be only accessed on various
# XxxPluginInterface classes.
options: Options
@abstractmethod
def lookup_fully_qualified(self, fullname: str) -> SymbolTableNode | None:
"""Lookup a symbol by its full name (including module).
This lookup function available for all plugins. Return None if a name
is not found. This function doesn't support lookup from current scope.
Use SemanticAnalyzerPluginInterface.lookup_qualified() for this."""
raise NotImplementedError
@trait
| CommonPluginApi |
python | langchain-ai__langchain | libs/core/langchain_core/prompts/message.py | {
"start": 393,
"end": 2636
} | class ____(Serializable, ABC):
"""Base class for message prompt templates."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "prompts", "chat"]`
"""
return ["langchain", "prompts", "chat"]
@abstractmethod
def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
"""Format messages from kwargs. Should return a list of `BaseMessage` objects.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
List of `BaseMessage` objects.
"""
async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
"""Async format messages from kwargs.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
List of `BaseMessage` objects.
"""
return self.format_messages(**kwargs)
@property
@abstractmethod
def input_variables(self) -> list[str]:
"""Input variables for this prompt template.
Returns:
List of input variables.
"""
def pretty_repr(
self,
html: bool = False, # noqa: FBT001,FBT002
) -> str:
"""Human-readable representation.
Args:
html: Whether to format as HTML.
Returns:
Human-readable representation.
"""
raise NotImplementedError
def pretty_print(self) -> None:
"""Print a human-readable representation."""
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
def __add__(self, other: Any) -> ChatPromptTemplate:
"""Combine two prompt templates.
Args:
other: Another prompt template.
Returns:
Combined prompt template.
"""
# Import locally to avoid circular import.
from langchain_core.prompts.chat import ChatPromptTemplate # noqa: PLC0415
prompt = ChatPromptTemplate(messages=[self])
return prompt + other
| BaseMessagePromptTemplate |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 3926,
"end": 4002
} | class ____:
def __repr__(self):
return "Breaking(\n)"
| BreakingRepr |
python | getsentry__sentry | src/sentry/taskworker/scheduler/schedules.py | {
"start": 1037,
"end": 3100
} | class ____(Schedule):
"""
Task schedules defined as `datetime.timedelta` intervals
If a timedelta interval loses it's last_run state, it will assume
that at least one interval has been missed, and it will become due immediately.
After the first spawn, the schedule will align to to the interval's duration.
"""
def __init__(self, delta: timedelta) -> None:
self._delta = delta
if delta.microseconds:
raise ValueError("microseconds are not supported")
if delta.total_seconds() < 0:
raise ValueError("interval must be at least one second")
def monitor_interval(self) -> tuple[int, MonitorConfigScheduleUnit]:
time_units: tuple[tuple[MonitorConfigScheduleUnit, float], ...] = (
("day", 60 * 60 * 24.0),
("hour", 60 * 60.0),
("minute", 60.0),
)
seconds = self._delta.total_seconds()
for unit, divider in time_units:
if seconds >= divider:
interval = int(seconds / divider)
return (interval, unit)
return (int(seconds), "second")
def is_due(self, last_run: datetime | None = None) -> bool:
"""Check if the schedule is due to run again based on last_run."""
if last_run is None:
return True
remaining = self.remaining_seconds(last_run)
return remaining <= 0
def remaining_seconds(self, last_run: datetime | None = None) -> int:
"""The number of seconds remaining until the next task should spawn"""
if last_run is None:
return 0
# floor to timestamp as microseconds are not relevant
now = int(timezone.now().timestamp())
last_run_ts = int(last_run.timestamp())
seconds_remaining = self._delta.total_seconds() - (now - last_run_ts)
return max(int(seconds_remaining), 0)
def runtime_after(self, start: datetime) -> datetime:
"""Get the next time a task should run after start"""
return start + self._delta
| TimedeltaSchedule |
python | tensorflow__tensorflow | tensorflow/python/ops/lookup_ops.py | {
"start": 79931,
"end": 93637
} | class ____(LookupInterface):
"""A mutable hash table with faster lookups and higher memory usage.
Data can be inserted by calling the `insert` method and removed by calling the
`remove` method. It does not support initialization via the init method.
Compared to `MutableHashTable`, `DenseHashTable` offers generally faster
`insert`, `remove` and `lookup` operations, in exchange for a higher overall
memory footprint.
It uses "open addressing" with quadratic reprobing to resolve collisions. This
requires specifying two keys in the key space, `empty_key` and `deleted_key`,
that can never inserted into the table.
Unlike `MutableHashTable`, `DenseHashTable` does not require additional memory
for temporary tensors created during checkpointing and restore operations.
Example usage:
>>> table = tf.lookup.experimental.DenseHashTable(
... key_dtype=tf.string,
... value_dtype=tf.int64,
... default_value=-1,
... empty_key='',
... deleted_key='$')
>>> keys = tf.constant(['a', 'b', 'c'])
>>> values = tf.constant([0, 1, 2], dtype=tf.int64)
>>> table.insert(keys, values)
>>> table.remove(tf.constant(['c']))
>>> table.lookup(tf.constant(['a', 'b', 'c','d'])).numpy()
array([ 0, 1, -1, -1])
"""
# TODO(andreasst): consider extracting common code with MutableHashTable into
# a common superclass.
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
deleted_key,
initial_num_buckets=None,
name="MutableDenseHashTable",
checkpoint=True,
experimental_is_anonymous=False):
"""Creates an empty `DenseHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
empty_key: the key to use to represent empty buckets internally. Must not
be used in insert, remove or lookup operations.
deleted_key: the key to use to represent deleted buckets internally. Must
not be used in insert, remove or lookup operations and be different from
the empty_key.
initial_num_buckets: the initial number of buckets (optional,
default to 2^17=131072). Note that the default value is
relatively large (~1MB), so if you are going to create many
tables (likely the case when `experimental_is_anonymous` is
`True`), you should set `initial_num_buckets` to a smaller
value to reduce memory usage.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
experimental_is_anonymous: Whether to use anonymous mode for the
table (default is False). In anonymous mode, the table
resource can only be accessed via a resource handle. It can't
be looked up by a name. When all resource handles pointing to
that resource are gone, the resource will be deleted
automatically.
Returns:
A `DenseHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(
default_value, dtype=value_dtype, name="default_value")
self._key_dtype = key_dtype
self._value_dtype = value_dtype
# TODO(b/201578996): Pick a good default for initial_num_buckets
# other than 2^17.
self._initial_num_buckets = initial_num_buckets
self._value_shape = self._default_value.get_shape()
self._checkpoint = checkpoint
self._name = name
self._empty_key = empty_key
self._deleted_key = deleted_key
self._is_anonymous = experimental_is_anonymous
if not self._is_anonymous:
self._shared_name = None
if context.executing_eagerly():
# TODO(allenl): This will leak memory due to kernel caching by
# the shared_name attribute value (but is better than the
# alternative of sharing everything by default when executing
# eagerly; hopefully creating tables in a loop is uncommon).
self._shared_name = "table_%d" % (ops.uid(),)
super(DenseHashTable, self).__init__(key_dtype, value_dtype)
self._resource_handle = self._create_resource()
if checkpoint:
saveable = DenseHashTable._Saveable(self, name)
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def _create_resource(self):
empty_key = ops.convert_to_tensor(
self._empty_key, dtype=self._key_dtype, name="empty_key")
deleted_key = ops.convert_to_tensor(
self._deleted_key, dtype=self._key_dtype, name="deleted_key")
if self._is_anonymous:
table_ref = gen_lookup_ops.anonymous_mutable_dense_hash_table(
empty_key=empty_key,
deleted_key=deleted_key,
value_dtype=self._value_dtype,
value_shape=self._value_shape,
initial_num_buckets=self._initial_num_buckets,
name=self._name)
else:
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = self._checkpoint and self._shared_name is None
table_ref = gen_lookup_ops.mutable_dense_hash_table_v2(
empty_key=empty_key,
deleted_key=deleted_key,
shared_name=self._shared_name,
use_node_name_sharing=use_node_name_sharing,
value_dtype=self._value_dtype,
value_shape=self._value_shape,
initial_num_buckets=self._initial_num_buckets,
name=self._name)
if context.executing_eagerly():
self._table_name = None
else:
self._table_name = table_ref.op.name.split("/")[-1]
return table_ref
@property
def name(self):
return self._table_name
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self.name, [self.resource_handle]):
with ops.colocate_with(self.resource_handle):
return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
with ops.name_scope(name, "%s_lookup_table_find" % self.name,
[self.resource_handle, keys]):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys")
with ops.colocate_with(self.resource_handle):
values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, keys,
self._default_value)
return values
def insert_or_assign(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the table's
key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
with ops.name_scope(name, "%s_lookup_table_insert" % self.name,
[self.resource_handle, keys, values]):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys")
values = ops.convert_to_tensor(
values, dtype=self._value_dtype, name="values")
with ops.colocate_with(self.resource_handle):
op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys,
values)
return op
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the table's
key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
return self.insert_or_assign(keys, values, name)
def erase(self, keys, name=None):
"""Removes `keys` and its associated values from the table.
If a key is not present in the table, it is silently ignored.
Args:
keys: Keys to remove. Can be a tensor of any shape. Must match the table's
key type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` do not match the table data types.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_lookup_table_remove" % self.name,
(self.resource_handle, keys, self._default_value)):
# pylint: disable=protected-access
op = gen_lookup_ops.lookup_table_remove_v2(self.resource_handle, keys)
return op
def remove(self, keys, name=None):
"""Removes `keys` and its associated values from the table.
If a key is not present in the table, it is silently ignored.
Args:
keys: Keys to remove. Can be a tensor of any shape. Must match the table's
key type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` do not match the table data types.
"""
return self.erase(keys, name)
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self.name,
[self.resource_handle]):
with ops.colocate_with(self.resource_handle):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self.resource_handle, self._key_dtype, self._value_dtype)
return exported_keys, exported_values
def _serialize_to_tensors(self):
"""Implements checkpointing interface in `Trackable`."""
tensors = self.export()
return {"-keys": tensors[0], "-values": tensors[1]}
def _restore_from_tensors(self, restored_tensors):
"""Implements checkpointing interface in `Trackable`."""
with ops.name_scope("%s_table_restore" % self._name):
with ops.colocate_with(self.resource_handle):
return gen_lookup_ops.lookup_table_import_v2(
self.resource_handle,
restored_tensors["-keys"],
restored_tensors["-values"])
def _copy_trackable_to_cpu(self, object_map):
"""Implements checkpointing protocols for `Trackable`."""
if self not in object_map:
# If self is not already populated in object map, instantiate the copy
object_map[self] = DenseHashTable(
self._key_dtype,
self._value_dtype,
self._default_value,
self._empty_key,
self._deleted_key,
self._initial_num_buckets,
self._name,
self._checkpoint,
self._is_anonymous
)
# Copy values from `self` to copy of `self`
serialized = self._serialize_to_tensors()
object_map[self]._restore_from_tensors(serialized) # pylint: disable=protected-access
# This class is needed for `DenseHashTable(checkpoint=True)`.
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for DenseHashTable."""
def __init__(self, table, name, table_name=None):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
self.table_name = table_name or name
# pylint: disable=protected-access
super(DenseHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # unused
# pylint: disable=protected-access
with ops.name_scope("%s_table_restore" % self.table_name):
with ops.colocate_with(self.op.resource_handle):
return gen_lookup_ops.lookup_table_import_v2(self.op.resource_handle,
restored_tensors[0],
restored_tensors[1])
| DenseHashTable |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 202520,
"end": 202624
} | class ____(
_Int4MultiRangeTests, _MultiRangeTypeCompilation
):
pass
| Int4MultiRangeCompilationTest |
python | huggingface__transformers | src/transformers/models/smolvlm/modular_smolvlm.py | {
"start": 6827,
"end": 6899
} | class ____(Idefics3ImageProcessorFast):
pass
| SmolVLMImageProcessorFast |
python | ipython__ipython | IPython/core/ultratb.py | {
"start": 44962,
"end": 46650
} | class ____(ListTB):
"""Extension which holds some state: the last exception value"""
last_syntax_error: BaseException | None
def __init__(self, *, theme_name):
super().__init__(theme_name=theme_name)
self.last_syntax_error = None
def __call__(self, etype, value, elist):
self.last_syntax_error = value
super().__call__(etype, value, elist)
def structured_traceback(
self,
etype: type,
evalue: BaseException | None,
etb: TracebackType | None = None,
tb_offset: int | None = None,
context: int = 5,
) -> list[str]:
value = evalue
# If the source file has been edited, the line in the syntax error can
# be wrong (retrieved from an outdated cache). This replaces it with
# the current value.
if (
isinstance(value, SyntaxError)
and isinstance(value.filename, str)
and isinstance(value.lineno, int)
):
linecache.checkcache(value.filename)
newtext = linecache.getline(value.filename, value.lineno)
if newtext:
value.text = newtext
self.last_syntax_error = value
return super(SyntaxTB, self).structured_traceback(
etype, value, etb, tb_offset=tb_offset, context=context
)
def clear_err_state(self) -> Any | None:
"""Return the current error state and clear it"""
e = self.last_syntax_error
self.last_syntax_error = None
return e
def stb2text(self, stb: list[str]) -> str:
"""Convert a structured traceback (a list) to a string."""
return "".join(stb)
| SyntaxTB |
python | doocs__leetcode | solution/0600-0699/0628.Maximum Product of Three Numbers/Solution.py | {
"start": 0,
"end": 199
} | class ____:
def maximumProduct(self, nums: List[int]) -> int:
nums.sort()
a = nums[-1] * nums[-2] * nums[-3]
b = nums[-1] * nums[0] * nums[1]
return max(a, b)
| Solution |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_bases.py | {
"start": 1529,
"end": 9722
} | class ____:
@patch('bokeh.core.property.bases.Property.validate')
def test_is_valid_supresses_validation_detail(self, mock_validate: MagicMock) -> None:
p = bcpb.Property()
p.is_valid(None)
assert mock_validate.called
assert mock_validate.call_args[0] == (None, False)
def test_serialized_default(self) -> None:
class NormalProp(bcpb.Property[Any]):
pass
class ReadonlyProp(bcpb.Property[Any]):
_readonly = True
class NotSerializedProp(bcpb.Property[Any]):
_serialized = False
p0 = NormalProp()
assert p0.serialized is True
assert p0.readonly is False
p1 = ReadonlyProp()
assert p1.serialized is False
assert p1.readonly is True
p2 = NotSerializedProp()
assert p2.serialized is False
assert p2.readonly is False
def test_assert_bools(self) -> None:
hp = HasProps()
p = bcpb.Property()
p.asserts(True, "true")
assert p.prepare_value(hp, "foo", 10) == 10
p.asserts(False, "false")
with pytest.raises(ValueError) as e:
p.prepare_value(hp, "foo", 10)
assert str(e) == "false"
def test_assert_functions(self) -> None:
hp = HasProps()
p = bcpb.Property()
p.asserts(lambda obj, value: True, "true")
p.asserts(lambda obj, value: obj is hp, "true")
p.asserts(lambda obj, value: value==10, "true")
assert p.prepare_value(hp, "foo", 10) == 10
p.asserts(lambda obj, value: False, "false")
with pytest.raises(ValueError) as e:
p.prepare_value(hp, "foo", 10)
assert str(e) == "false"
def test_assert_msg_funcs(self) -> None:
hp = HasProps()
p = bcpb.Property()
def raise_(ex):
raise ex
p.asserts(False, lambda obj, name, value: raise_(ValueError(f"bad {hp==obj} {name} {value}")))
with pytest.raises(ValueError) as e:
p.prepare_value(hp, "foo", 10)
assert str(e) == "bad True name, 10"
def test_matches_basic_types(self, capsys: Capture) -> None:
p = bcpb.Property()
for x in [1, 1.2, "a", np.arange(4), None, False, True, {}, []]:
assert p.matches(x, x) is True
assert p.matches(x, "junk") is False
_, err = capsys.readouterr()
assert err == ""
def test_matches_compatible_arrays(self, capsys: Capture) -> None:
p = bcpb.Property()
a = np.arange(5)
b = np.arange(5)
assert p.matches(a, b) is True
assert p.matches(a, b+1) is False
for x in [1, 1.2, "a", np.arange(4), None, False]:
assert p.matches(a, x) is False
assert p.matches(x, b) is False
_, err = capsys.readouterr()
assert err == ""
def test_matches_incompatible_arrays(self, capsys: Capture) -> None:
p = bcpb.Property()
a = np.arange(5)
b = np.arange(5).astype(str)
assert p.matches(a, b) is False
_, _err = capsys.readouterr()
# no way to suppress FutureWarning in this case
# assert _err == ""
def test_matches_dicts_with_array_values(self, capsys: Capture) -> None:
p = bcpb.Property()
d1 = dict(foo=np.arange(10))
d2 = dict(foo=np.arange(10))
assert p.matches(d1, d1) is True
assert p.matches(d1, d2) is True
# XXX not sure if this is preferable to have match, or not
assert p.matches(d1, dict(foo=list(range(10)))) is True
assert p.matches(d1, dict(foo=np.arange(11))) is False
assert p.matches(d1, dict(bar=np.arange(10))) is False
assert p.matches(d1, dict(bar=10)) is False
_, err = capsys.readouterr()
assert err == ""
def test_matches_non_dict_containers_with_array_false(self, capsys: Capture) -> None:
p = bcpb.Property()
d1 = [np.arange(10)]
d2 = [np.arange(10)]
assert p.matches(d1, d1) is True # because object identity
assert p.matches(d1, d2) is False
t1 = (np.arange(10),)
t2 = (np.arange(10),)
assert p.matches(t1, t1) is True # because object identity
assert p.matches(t1, t2) is False
_, err = capsys.readouterr()
assert err == ""
def test_matches_dicts_with_series_values(self, capsys: Capture) -> None:
pd = pytest.importorskip("pandas")
p = bcpb.Property()
d1 = pd.DataFrame(dict(foo=np.arange(10)))
d2 = pd.DataFrame(dict(foo=np.arange(10)))
assert p.matches(d1.foo, d1.foo) is True
assert p.matches(d1.foo, d2.foo) is True
# XXX not sure if this is preferable to have match, or not
assert p.matches(d1.foo, (range(10))) is True
assert p.matches(d1.foo, np.arange(11)) is False
assert p.matches(d1.foo, np.arange(10)+1) is False
assert p.matches(d1.foo, 10) is False
_, err = capsys.readouterr()
assert err == ""
def test_matches_dicts_with_index_values(self, capsys: Capture) -> None:
pd = pytest.importorskip("pandas")
p = bcpb.Property()
d1 = pd.DataFrame(dict(foo=np.arange(10)))
d2 = pd.DataFrame(dict(foo=np.arange(10)))
assert p.matches(d1.index, d1.index) is True
assert p.matches(d1.index, d2.index) is True
# XXX not sure if this is preferable to have match, or not
assert p.matches(d1.index, list(range(10))) is True
assert p.matches(d1.index, np.arange(11)) is False
assert p.matches(d1.index, np.arange(10)+1) is False
assert p.matches(d1.index, 10) is False
_, err = capsys.readouterr()
assert err == ""
def test_validation_on(self) -> None:
assert bcpb.Property._should_validate is True
assert bcpb.validation_on()
bcpb.Property._should_validate = False
assert not bcpb.validation_on()
bcpb.Property._should_validate = True
assert bcpb.validation_on()
def test__hinted_value_is_identity(self) -> None:
p = bcpb.Property()
assert p._hinted_value(10, "hint") == 10
assert p._hinted_value(10, None) == 10
@patch('bokeh.core.property.bases.Property._hinted_value')
def test_prepare_value_uses__hinted_value(self, mock_hv: MagicMock) -> None:
hp = HasProps()
p = bcpb.Property()
p.prepare_value(hp, "foo", 10)
assert mock_hv.called
def test_pandas_na(self):
pd = pytest.importorskip("pandas")
# Property.matches handles this as False could change in the future.
# pd.NA raises a TypeError when bool(pd.NA == pd.NA)
assert bcpb.Property().matches(pd.NA, pd.NA) is False
assert bcpb.Property().matches({"name": pd.NA}, {"name": 1}) is False
def test_nan(self):
# Property.matches handles this as False could change in the future.
assert bcpb.Property().matches(np.nan, np.nan) is False
assert bcpb.Property().matches({"name": np.nan}, {"name": np.nan}) is False
assert bcpb.Property().matches(np.array([np.nan]), np.array([np.nan])) is False
def test_nat(self):
# Property.matches handles this as False could change in the future.
nat = np.datetime64("NAT")
assert np.isnat(nat)
assert bcpb.Property().matches(nat, nat) is False
assert bcpb.Property().matches({"name": nat}, {"name": nat}) is False
assert bcpb.Property().matches(np.array([nat]), np.array([nat])) is False
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpb, ALL)
| TestProperty |
python | oauthlib__oauthlib | oauthlib/openid/connect/core/exceptions.py | {
"start": 2506,
"end": 2748
} | class ____(OpenIDClientError):
"""
The request parameter contains an invalid Request Object.
"""
error = 'invalid_request_object'
description = 'The request parameter contains an invalid Request Object.'
| InvalidRequestObject |
python | pydata__xarray | xarray/tests/test_utils.py | {
"start": 1816,
"end": 6419
} | class ____:
@pytest.fixture(autouse=True)
def setup(self):
self.x = {"a": "A", "b": "B"}
self.y = {"c": "C", "b": "B"}
self.z = {"a": "Z"}
def test_equivalent(self):
assert utils.equivalent(0, 0)
assert utils.equivalent(np.nan, np.nan)
assert utils.equivalent(0, np.array(0.0))
assert utils.equivalent([0], np.array([0]))
assert utils.equivalent(np.array([0]), [0])
assert utils.equivalent(np.arange(3), 1.0 * np.arange(3))
assert not utils.equivalent(0, np.zeros(3))
# Test NaN comparisons (issue #10833)
# Python float NaN
assert utils.equivalent(float("nan"), float("nan"))
# NumPy scalar NaN (various dtypes)
assert utils.equivalent(np.float64(np.nan), np.float64(np.nan))
assert utils.equivalent(np.float32(np.nan), np.float32(np.nan))
# Mixed: Python float NaN vs NumPy scalar NaN
assert utils.equivalent(float("nan"), np.float64(np.nan))
assert utils.equivalent(np.float64(np.nan), float("nan"))
def test_safe(self):
# should not raise exception:
utils.update_safety_check(self.x, self.y)
def test_unsafe(self):
with pytest.raises(ValueError):
utils.update_safety_check(self.x, self.z)
def test_compat_dict_intersection(self):
assert {"b": "B"} == utils.compat_dict_intersection(self.x, self.y)
assert {} == utils.compat_dict_intersection(self.x, self.z)
def test_compat_dict_union(self):
assert {"a": "A", "b": "B", "c": "C"} == utils.compat_dict_union(self.x, self.y)
with pytest.raises(
ValueError,
match=r"unsafe to merge dictionaries without "
"overriding values; conflicting key",
):
utils.compat_dict_union(self.x, self.z)
def test_dict_equiv(self):
x: dict = {}
x["a"] = 3
x["b"] = np.array([1, 2, 3])
y: dict = {}
y["b"] = np.array([1.0, 2.0, 3.0])
y["a"] = 3
assert utils.dict_equiv(x, y) # two nparrays are equal
y["b"] = [1, 2, 3] # np.array not the same as a list
assert utils.dict_equiv(x, y) # nparray == list
x["b"] = [1.0, 2.0, 3.0]
assert utils.dict_equiv(x, y) # list vs. list
x["c"] = None
assert not utils.dict_equiv(x, y) # new key in x
x["c"] = np.nan
y["c"] = np.nan
assert utils.dict_equiv(x, y) # as intended, nan is nan
x["c"] = np.inf
y["c"] = np.inf
assert utils.dict_equiv(x, y) # inf == inf
y = dict(y)
assert utils.dict_equiv(x, y) # different dictionary types are fine
y["b"] = 3 * np.arange(3)
assert not utils.dict_equiv(x, y) # not equal when arrays differ
def test_frozen(self):
x = utils.Frozen(self.x)
with pytest.raises(TypeError):
x["foo"] = "bar" # type: ignore[index]
with pytest.raises(TypeError):
del x["a"] # type: ignore[attr-defined]
with pytest.raises(AttributeError):
x.update(self.y) # type: ignore[attr-defined]
assert x.mapping == self.x
assert repr(x) in (
"Frozen({'a': 'A', 'b': 'B'})",
"Frozen({'b': 'B', 'a': 'A'})",
)
def test_filtered(self):
x = utils.FilteredMapping(keys={"a"}, mapping={"a": 1, "b": 2})
assert "a" in x
assert "b" not in x
assert x["a"] == 1
assert list(x) == ["a"]
assert len(x) == 1
assert repr(x) == "FilteredMapping(keys={'a'}, mapping={'a': 1, 'b': 2})"
assert dict(x) == {"a": 1}
def test_flat_items() -> None:
mapping = {"x": {"y": 1, "z": 2}, "x/y": 3}
actual = list(flat_items(mapping))
expected = [("x/y", 1), ("x/z", 2), ("x/y", 3)]
assert actual == expected
def test_repr_object():
obj = utils.ReprObject("foo")
assert repr(obj) == "foo"
assert isinstance(obj, Hashable)
assert not isinstance(obj, str)
def test_repr_object_magic_methods():
o1 = utils.ReprObject("foo")
o2 = utils.ReprObject("foo")
o3 = utils.ReprObject("bar")
o4 = "foo"
assert o1 == o2
assert o1 != o3
assert o1 != o4
assert hash(o1) == hash(o2)
assert hash(o1) != hash(o3)
assert hash(o1) != hash(o4)
def test_is_remote_uri():
assert utils.is_remote_uri("http://example.com")
assert utils.is_remote_uri("https://example.com")
assert not utils.is_remote_uri(" http://example.com")
assert not utils.is_remote_uri("example.nc")
| TestDictionaries |
python | ZoranPandovski__al-go-rithms | data_structures/heap/Python/BinaryHeaps/BinaryHeaps.py | {
"start": 2953,
"end": 4074
} | class ____(Heap):
def __init__(self):
self.array = [-float("inf")]
self.size = 0
def insert(self, el):
self.array.append(el)
self.size += 1
ind = self.size
while el < self.array[self.parent(ind)]:
self.array[ind], self.array[self.parent(ind)] = self.array[self.parent(ind)], self.array[ind]
ind = self.parent(ind)
def extract_min(self):
root = self.get_root()
self.array[1] = self.array.pop()
self.size -= 1
self.min_heapify(1)
return root
def min_heapify(self, ind):
smallest_ind = ind
if self.child_left(ind) <= self.size and self.array[self.child_left(ind)] < self.array[smallest_ind]:
smallest_ind = self.child_left(ind)
if self.child_right(ind) <= self.size and self.array[self.child_right(ind)] < self.array[smallest_ind]:
smallest_ind = self.child_right(ind)
if smallest_ind != ind:
self.array[ind], self.array[smallest_ind] = self.array[smallest_ind], self.array[ind]
self.min_heapify(smallest_ind)
| MinHeap |
python | scrapy__scrapy | tests/test_request_cb_kwargs.py | {
"start": 1656,
"end": 5746
} | class ____(MockServerSpider):
name = "kwargs"
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
InjectArgumentsDownloaderMiddleware: 750,
},
"SPIDER_MIDDLEWARES": {
InjectArgumentsSpiderMiddleware: 750,
},
}
checks: list[bool] = []
async def start(self):
data = {"key": "value", "number": 123, "callback": "some_callback"}
yield Request(self.mockserver.url("/first"), self.parse_first, cb_kwargs=data)
yield Request(
self.mockserver.url("/general_with"), self.parse_general, cb_kwargs=data
)
yield Request(self.mockserver.url("/general_without"), self.parse_general)
yield Request(self.mockserver.url("/no_kwargs"), self.parse_no_kwargs)
yield Request(
self.mockserver.url("/default"), self.parse_default, cb_kwargs=data
)
yield Request(
self.mockserver.url("/takes_less"), self.parse_takes_less, cb_kwargs=data
)
yield Request(
self.mockserver.url("/takes_more"), self.parse_takes_more, cb_kwargs=data
)
yield Request(self.mockserver.url("/downloader_mw"), self.parse_downloader_mw)
yield Request(self.mockserver.url("/spider_mw"), self.parse_spider_mw)
def parse_first(self, response, key, number):
self.checks.append(key == "value")
self.checks.append(number == 123)
self.crawler.stats.inc_value("boolean_checks", 2)
yield response.follow(
self.mockserver.url("/two"),
self.parse_second,
cb_kwargs={"new_key": "new_value"},
)
def parse_second(self, response, new_key):
self.checks.append(new_key == "new_value")
self.crawler.stats.inc_value("boolean_checks")
def parse_general(self, response, **kwargs):
if response.url.endswith("/general_with"):
self.checks.append(kwargs["key"] == "value")
self.checks.append(kwargs["number"] == 123)
self.checks.append(kwargs["callback"] == "some_callback")
self.crawler.stats.inc_value("boolean_checks", 3)
elif response.url.endswith("/general_without"):
self.checks.append(
kwargs == {} # pylint: disable=use-implicit-booleaness-not-comparison
)
self.crawler.stats.inc_value("boolean_checks")
def parse_no_kwargs(self, response):
self.checks.append(response.url.endswith("/no_kwargs"))
self.crawler.stats.inc_value("boolean_checks")
def parse_default(self, response, key, number=None, default=99):
self.checks.append(response.url.endswith("/default"))
self.checks.append(key == "value")
self.checks.append(number == 123)
self.checks.append(default == 99)
self.crawler.stats.inc_value("boolean_checks", 4)
def parse_takes_less(self, response, key, callback):
"""
Should raise
TypeError: parse_takes_less() got an unexpected keyword argument 'number'
"""
def parse_takes_more(self, response, key, number, callback, other):
"""
Should raise
TypeError: parse_takes_more() missing 1 required positional argument: 'other'
"""
def parse_downloader_mw(
self, response, from_process_request, from_process_response
):
self.checks.append(bool(from_process_request))
self.checks.append(bool(from_process_response))
self.crawler.stats.inc_value("boolean_checks", 2)
def parse_spider_mw(self, response, from_process_spider_input, from_process_start):
self.checks.append(bool(from_process_spider_input))
self.checks.append(bool(from_process_start))
self.crawler.stats.inc_value("boolean_checks", 2)
return Request(self.mockserver.url("/spider_mw_2"), self.parse_spider_mw_2)
def parse_spider_mw_2(self, response, from_process_spider_output):
self.checks.append(bool(from_process_spider_output))
self.crawler.stats.inc_value("boolean_checks", 1)
| KeywordArgumentsSpider |
python | gevent__gevent | src/gevent/tests/test__pool.py | {
"start": 260,
"end": 4194
} | class ____(unittest.TestCase):
klass = gevent.pool.Pool
def test_apply_async(self):
done = Event()
def some_work(_):
done.set()
pool = self.klass(2)
pool.apply_async(some_work, ('x', ))
done.wait()
def test_apply(self):
value = 'return value'
def some_work():
return value
pool = self.klass(2)
result = pool.apply(some_work)
self.assertEqual(value, result)
def test_apply_raises(self):
pool = self.klass(1)
def raiser():
raise ExpectedException()
try:
pool.apply(raiser)
except ExpectedException:
pass
else:
self.fail("Should have raised ExpectedException")
# Don't let the metaclass automatically force any error
# that reaches the hub from a spawned greenlet to become
# fatal; that defeats the point of the test.
test_apply_raises.error_fatal = False
def test_multiple_coros(self):
evt = Event()
results = []
def producer():
gevent.sleep(0.001)
results.append('prod')
evt.set()
def consumer():
results.append('cons1')
evt.wait()
results.append('cons2')
pool = self.klass(2)
done = pool.spawn(consumer)
pool.apply_async(producer)
done.get()
self.assertEqual(['cons1', 'prod', 'cons2'], results)
def dont_test_timer_cancel(self):
timer_fired = []
def fire_timer():
timer_fired.append(True)
def some_work():
gevent.timer(0, fire_timer) # pylint:disable=no-member
pool = self.klass(2)
pool.apply(some_work)
gevent.sleep(0)
self.assertEqual(timer_fired, [])
def test_reentrant(self):
pool = self.klass(1)
result = pool.apply(pool.apply, (lambda a: a + 1, (5, )))
self.assertEqual(result, 6)
evt = Event()
pool.apply_async(evt.set)
evt.wait()
@greentest.skipOnPyPy("Does not work on PyPy") # Why?
def test_stderr_raising(self):
# testing that really egregious errors in the error handling code
# (that prints tracebacks to stderr) don't cause the pool to lose
# any members
import sys
pool = self.klass(size=1)
# we're going to do this by causing the traceback.print_exc in
# safe_apply to raise an exception and thus exit _main_loop
normal_err = sys.stderr
try:
sys.stderr = FakeFile()
waiter = pool.spawn(crash)
with gevent.Timeout(2):
# Without the timeout, we can get caught...doing something?
# If we call PyErr_WriteUnraisable at a certain point,
# we appear to switch back to the hub and do nothing,
# meaning we sit forever. The timeout at least keeps us from
# doing that and fails the test if we mess up error handling.
self.assertRaises(RuntimeError, waiter.get)
# the pool should have something free at this point since the
# waiter returned
# pool.Pool change: if an exception is raised during execution of a link,
# the rest of the links are scheduled to be executed on the next hub iteration
# this introduces a delay in updating pool.sem which makes pool.free_count() report 0
# therefore, sleep:
gevent.sleep(0)
self.assertEqual(pool.free_count(), 1)
# shouldn't block when trying to get
with gevent.Timeout.start_new(0.1):
pool.apply(gevent.sleep, (0, ))
finally:
sys.stderr = normal_err
pool.join()
def crash(*_args, **_kw):
raise RuntimeError("Raising an error from the crash() function")
| TestCoroutinePool |
python | pypa__setuptools | setuptools/_vendor/more_itertools/more.py | {
"start": 88959,
"end": 101134
} | class ____:
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
procedure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
def rlocate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``, starting from the right and moving left.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
[4, 2, 1]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> iterable = iter('abcb')
>>> pred = lambda x: x == 'b'
>>> list(rlocate(iterable, pred))
[3, 1]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(rlocate(iterable, pred=pred, window_size=3))
[9, 5, 1]
Beware, this function won't return anything for infinite iterables.
If *iterable* is reversible, ``rlocate`` will reverse it and search from
the right. Otherwise, it will search from the left and return the results
in reverse order.
See :func:`locate` to for other example applications.
"""
if window_size is None:
try:
len_iter = len(iterable)
return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
except TypeError:
pass
return reversed(list(locate(iterable, pred, window_size)))
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0]
def partitions(iterable):
"""Yield all possible order-preserving partitions of *iterable*.
>>> iterable = 'abc'
>>> for part in partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['a', 'b', 'c']
This is unrelated to :func:`partition`.
"""
sequence = list(iterable)
n = len(sequence)
for i in powerset(range(1, n)):
yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
def set_partitions(iterable, k=None):
"""
Yield the set partitions of *iterable* into *k* parts. Set partitions are
not order-preserving.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable, 2):
... print([''.join(p) for p in part])
['a', 'bc']
['ab', 'c']
['b', 'ac']
If *k* is not given, every set partition is generated.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['b', 'ac']
['a', 'b', 'c']
"""
L = list(iterable)
n = len(L)
if k is not None:
if k < 1:
raise ValueError(
"Can't partition in a negative or zero number of groups"
)
elif k > n:
return
def set_partitions_helper(L, k):
n = len(L)
if k == 1:
yield [L]
elif n == k:
yield [[s] for s in L]
else:
e, *M = L
for p in set_partitions_helper(M, k - 1):
yield [[e], *p]
for p in set_partitions_helper(M, k):
for i in range(len(p)):
yield p[:i] + [[e] + p[i]] + p[i + 1 :]
if k is None:
for k in range(1, n + 1):
yield from set_partitions_helper(L, k)
else:
yield from set_partitions_helper(L, k)
| run_length |
python | huggingface__transformers | src/transformers/generation/configuration_utils.py | {
"start": 74064,
"end": 76461
} | class ____:
"""
Class that holds arguments relative to `torch.compile` behavior, when using automatic compilation in `generate`.
See [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) for more details on the arguments.
Args:
fullgraph (`bool`, *optional*, defaults to `False`):
If False (default), attempts to discover compileable regions that will be optimized. If True, then require
that the entire function be capturable into a single graph. If this is not possible (that is, if there are
graph breaks), then an error will be raised.
dynamic (`bool` or `None`, *optional*):
Whether to try to use dynamic shape graphs.
backend (`str` or `Callable`, *optional*, defaults to `"inductor"`):
Backend to be used.
mode (`str`, *optional*, defaults to `"reduce-overhead"`):
Controls balance between performance and overhead.
options (`dict`, *optional*):
A dictionary of options to pass to the backend.
Examples:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, CompileConfig
>>> tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-2b')
>>> model = AutoModelForCausalLM.from_pretrained('google/gemma-2-2b').cuda()
>>> # Automatic compile configuration, used with static cache
>>> compile_config = CompileConfig(dynamic=True)
>>> # Generation with static cache and compile config
>>> input = tokenizer.encode("Hello there, how", return_tensors="pt").cuda()
>>> output = model.generate(
... input, do_sample=False, max_new_tokens=300, cache_implementation="static", compile_config=compile_config
... )
>>> output_text = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
```
"""
fullgraph: bool = False
dynamic: bool | None = None
backend: str | Callable = "inductor"
mode: str = "reduce-overhead"
options: dict | None = None
# Used to flag our `generate` call to compile on e.g. CPU. Often not optimal, but useful for testing purposes.
_compile_all_devices = None
def to_dict(self) -> dict[str, Any]:
"""Serializes this instance to a Python dictionary."""
return copy.deepcopy({key: value for key, value in self.__dict__.items() if key != "_compile_all_devices"})
| CompileConfig |
python | google__python-fire | fire/test_components.py | {
"start": 4378,
"end": 4455
} | class ____:
def as_bool(self, arg=False):
return bool(arg)
| BoolConverter |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/stateful_random_ops_test.py | {
"start": 1883,
"end": 28763
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
super(StatefulRandomOpsTest, self).setUp()
physical_devices = config.list_physical_devices("CPU")
config.set_logical_device_configuration(
physical_devices[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def testCreateRNGStateIntSeed(self):
"""Tests `create_rng_state` when `seed` is int."""
# using leading 'F' to test overflow tolerance
state = random.create_rng_state(0xFFFF222233334444FFAA666677778888,
random.RNG_ALG_PHILOX)
self.assertAllEqual(
list(map(random._uint_to_int,
[0xFFAA666677778888, 0xFFFF222233334444] +
[0] * (random.PHILOX_STATE_SIZE - 2))),
state)
def assertAllDifferent(self, tensors):
"""Checks that there are no duplicate elements anywhere among the tensors.
Args:
tensors: a list of tensors. They can have different shapes.
"""
tensors = [array_ops.reshape(t, shape=[-1]) for t in tensors]
ls = array_ops.concat(tensors, axis=0).numpy().tolist()
self.assertAllEqual(len(ls), len(set(ls)))
@test_util.run_v2_only
def testNonDeterministicInts(self):
"""Tests that non_deterministic_ints returns different results every time.
This test is flaky, but with very low probability of failing.
"""
shape = [2, 3]
dtype = dtypes.int64
a = random.non_deterministic_ints(shape=shape, dtype=dtype)
self.assertAllEqual(shape, a.shape)
self.assertEqual(dtype, a.dtype)
b = random.non_deterministic_ints(shape, dtype=dtype)
self.assertAllDifferent([a, b])
def assertRegex(self, pattern, text):
self.assertTrue(
re.search(pattern, text),
"Can't find pattern '%s' in text '%s'" % (pattern, text))
@parameterized.parameters(list(random_ops_util.Algorithm))
@test_util.run_v2_only
def testBatchSeeds(self, alg):
"""Test for batch seeds."""
shape = [2, 3]
count = 6
gen = random.Generator.from_seed(1234, alg=alg)
if alg == random_ops_util.Algorithm.THREEFRY:
# We don't have CPU/GPU kernels for ThreeFry yet.
return
keys1 = gen._make_int64_keys(shape=shape)
keys2 = gen._make_int64_keys(shape=shape)
self.assertAllDifferent([keys1, keys2])
seeds1 = gen.make_seeds(count=count)
seeds2 = gen.make_seeds(count=count)
self.assertAllDifferent([seeds1[0, :], seeds2[0, :]])
gens = gen.split(count=count)
self.assertAllEqual(count, len(gens))
randoms = [g.uniform_full_int(shape=shape, dtype=dtypes.int32)
for g in gens]
self.assertAllDifferent(randoms)
# Tests graph mode.
@def_function.function
def f():
return gen.make_seeds(count=count)
for _ in range(3):
f()
@parameterized.parameters(list(random_ops_util.Algorithm))
@test_util.run_v2_only
@test_util.run_cuda_only
def testCrossDeviceSplit(self, alg):
"""Tests that a CPU RNG can split into RNGs on GPU."""
with ops.device("/device:CPU:0"):
gen = random.Generator.from_seed(1234, alg=alg) # gen is on CPU
self.assertRegex("CPU", gen.state.device)
if alg == random_ops_util.Algorithm.THREEFRY:
# We don't have CPU/GPU kernels for ThreeFry yet.
return
with ops.device(test_util.gpu_device_name()):
gens = gen.split(count=10) # gens are on GPU
self.assertRegex("GPU", gens[0].state.device)
@parameterized.parameters(list(random_ops_util.Algorithm))
@test_util.run_v2_only
def testSplitInFunction(self, alg):
g = random.Generator.from_seed(1, alg=alg)
if alg == random_ops_util.Algorithm.THREEFRY:
# We don't have CPU/GPU kernels for ThreeFry yet.
return
new_g = [None] # using list as mutable cells
@def_function.function
def f():
if new_g[0] is None: # avoid creating variable in 2nd trace
new_g[0] = g.split(2)
return [new_g[0][i].normal([]) for i in range(2)]
f()
def testFnVars(self):
"""Tests that RNG variable is added to ConcreteFunction.variables."""
rng = random.Generator.from_seed(0)
@def_function.function
def f():
return rng.normal([])
concrete = f.get_concrete_function()
self.assertIn(rng.state, concrete.variables)
@test_util.run_v2_only
def testReset(self):
shape = [2, 3]
gen = random.Generator.from_seed(0)
for resetter in [
lambda g: g.reset(state=[1, 2, 3]),
lambda g: g.reset_from_seed(1234),
lambda g: g.reset_from_key_counter(key=1, counter=[2, 3]),
]:
resetter(gen)
expected_normal = gen.normal(shape)
@def_function.function
def f(resetter):
resetter(gen)
return gen.normal(shape)
def check_results(expected_normal, v):
self.assertAllEqual(expected_normal, v)
check_results(expected_normal, f(resetter))
check_results(expected_normal, f(resetter))
@test_util.run_v2_only
def testGeneratorCreation(self):
"""Tests generator creation, in both eager and tf.function.
The interaction between Generator creation and defun should be the same as
tf.Variable.
"""
shape = [2, 3]
alg = random.RNG_ALG_PHILOX
for constructor in [
lambda: random.Generator(state=[1, 2, 3], alg=alg),
lambda: random.Generator.from_seed(1234),
lambda: random.Generator.from_key_counter( # pylint: disable=g-long-lambda
key=1, counter=[2, 3], alg=alg),
]:
gen = constructor()
# Tests tf.function
expected_normal1 = gen.normal(shape)
expected_normal2 = gen.normal(shape)
global g_seeded
g_seeded = None
@def_function.function
def f(constructor):
global g_seeded
# defun'ed function should only create variables once
if g_seeded is None:
g_seeded = constructor()
return g_seeded.normal(shape)
def check_results(expected_normal, v):
self.assertAllEqual(expected_normal, v)
check_results(expected_normal1, f(constructor))
check_results(expected_normal2, f(constructor))
@test_util.run_v2_only
def testCreateGeneratorFromSymbolic(self):
g = [None, None, None] # using list as mutable cells
@def_function.function
def f(scalar, vector2, vector3):
if g[0] is None: # avoid creating variable in 2nd trace
g[0] = random.Generator.from_seed(scalar)
g[0].reset_from_seed(scalar) # also test reset
g[1] = random.Generator.from_state(vector3, random.RNG_ALG_PHILOX)
g[1].reset(vector3)
g[2] = random.Generator.from_key_counter(
scalar, vector2, random.RNG_ALG_PHILOX)
g[2].reset_from_key_counter(scalar, vector2)
return [g[i].normal([]) for i in range(3)]
args = (1, [2, 2], [3, 3, 3])
args = [constant_op.constant(v) for v in args]
f(*args)
@parameterized.parameters([
(
"philox",
random_ops_util.Algorithm.PHILOX.value,
random_ops_util.Algorithm.PHILOX,
),
(
"threefry",
random_ops_util.Algorithm.THREEFRY.value,
random_ops_util.Algorithm.THREEFRY,
),
(
"auto_select",
random_ops_util.Algorithm.AUTO_SELECT.value,
random_ops_util.Algorithm.AUTO_SELECT,
),
])
@test_util.run_v2_only
def testAlg(self, name, int_id, enum_id):
g_by_name = random.Generator.from_seed(1234, name)
g_by_int = random.Generator.from_seed(1234, int_id)
g_by_enum = random.Generator.from_seed(1234, enum_id)
self.assertEqual(g_by_name.algorithm, g_by_int.algorithm)
self.assertEqual(g_by_name.algorithm, g_by_enum.algorithm)
if enum_id == random_ops_util.Algorithm.THREEFRY:
# We don't have CPU/GPU kernels for ThreeFry yet.
return
shape = [3]
output_by_name = g_by_name.normal(shape)
output_by_int = g_by_int.normal(shape)
output_by_enum = g_by_enum.normal(shape)
self.assertAllEqual(output_by_name, output_by_int)
self.assertAllEqual(output_by_name, output_by_enum)
@test_util.run_v2_only
def testGeneratorCreationWithVar(self):
"""Tests creating generator with a variable.
"""
alg = random.RNG_ALG_PHILOX
state = [1, 2, 3]
var = variables.Variable(state, dtype=random.STATE_TYPE)
g = random.Generator(state=state, alg=alg)
g_var = random.Generator(state=var, alg=alg)
shape = [2, 3]
g.normal(shape)
g_var.normal(shape)
self.assertAllEqual(g.state.read_value(), var.read_value())
@test_util.run_v2_only
def testGeneratorCreationUnseeded(self):
"""Tests generator creation, the unseeded case."""
shape = [2, 3]
global g_unseeded
g_unseeded = None
@def_function.function
def f():
global g_unseeded
# defun'ed function should only create variables once
if g_unseeded is None:
g_unseeded = random.Generator.from_non_deterministic_state()
return g_unseeded.normal(shape)
self.assertAllEqual(shape, f().shape)
@test_util.run_v2_only
def testGeneratorCopy(self):
"""Tests copying a generator."""
g = random.Generator.from_seed(0)
g_copy = random.Generator(g)
self.assertAllEqual(g.algorithm, g_copy.algorithm)
self.assertAllEqual(g.state.read_value(), g_copy.state.read_value())
# Tests tf.function
global g_seeded
g_seeded = None
# Do the same in tf.function
@def_function.function
def f():
global g_seeded
# defun'ed function should only create variables once
if g_seeded is None:
g_seeded = random.Generator(g)
self.assertAllEqual(g.algorithm, g_seeded.algorithm)
self.assertAllEqual(g.state.read_value(), g_seeded.state.read_value())
f()
@test_util.run_v1_only(
("This test is specifically for checking TF1 compatibility. "
"It cannot run under TF2."))
def testTF1(self):
seed = 1234
shape = [2, 3]
expected_normal1 = constant_op.constant(
[[0.9356609, 1.0854305, -0.93788373],
[-0.50615472, 1.31697023, 0.71375787]], dtype=dtypes.float32)
expected_normal2 = constant_op.constant(
[[-0.3964749, 0.8369565, -0.30946946],
[1.1206646, 1.00852597, -0.10185789]], dtype=dtypes.float32)
with self.cached_session() as sess:
gen1 = random.Generator.from_seed(seed)
gen2 = random.Generator.from_non_deterministic_state()
sess.run((gen1.state.initializer, gen2.state.initializer))
r1 = gen1.normal(shape, dtype=dtypes.float32)
r2 = gen2.normal(shape, dtype=dtypes.float32)
def f():
return sess.run((r1, r2))
def check_results(expected_normal, v1, v2):
self.assertAllClose(expected_normal, v1, rtol=1e-5, atol=1e-5)
self.assertAllEqual(shape, v2.shape)
check_results(expected_normal1, *f())
check_results(expected_normal2, *f())
@test_util.run_v2_only
@test_util.also_run_as_tf_function
def testEagerAndDefun(self):
"""A simple test to make sure the op works in eager and defunned mode."""
random.get_global_generator().normal((3,))
@test_util.run_v2_only
def testOpSeedSelectionAfterSetSeed(self):
"""Tests that op-seed selection is reset after reseting global generator.
Fixing GitHub issue 9171:
https://github.com/tensorflow/tensorflow/issues/9171
"""
shape = (3,)
random.get_global_generator().reset_from_seed(1)
a = random.get_global_generator().normal(shape)
random.get_global_generator().reset_from_seed(1)
b = random.get_global_generator().normal(shape)
self.assertAllEqual(a, b)
# Now do the above again using accelerated ('defun'ed) computation
@def_function.function
def f():
return random.get_global_generator().normal(shape)
random.get_global_generator().reset_from_seed(1)
c = f()
random.get_global_generator().reset_from_seed(1)
d = f()
self.assertAllEqual(c, d)
self.assertAllEqual(a, c)
@test_util.run_v2_only
def testOpSeedSelectionNotSensitive(self):
"""Test that op-seed selection is not sensitive to trivial changes.
Test that op-seed selection is not sensitive to trivial computation
(i.e. graph) changes.
Fixing b/32087099
"""
def f(include_print):
shape = constant_op.constant([5])
if include_print:
shape = logging_ops.Print(shape, [shape])
return random.get_global_generator().normal(shape)
def compare(fst_includes_print, snd_includes_print):
random.get_global_generator().reset_from_seed(50)
fst = f(fst_includes_print)
random.get_global_generator().reset_from_seed(50)
snd = f(snd_includes_print)
self.assertAllEqual(fst, snd)
# Now do the above again using accelerated (defunned) 'f'.
# Running 'f' with two different Boolean arguments should cause
# two different graphs to be generated, hence demonstrating the
# insensitivity to graph changes.
f_acc = def_function.function(f)
random.get_global_generator().reset_from_seed(50)
fst = f_acc(fst_includes_print)
random.get_global_generator().reset_from_seed(50)
snd = f_acc(snd_includes_print)
self.assertAllEqual(fst, snd)
compare(False, False)
compare(True, True)
compare(True, False)
@parameterized.parameters(list(random_ops_util.Algorithm))
@test_util.run_v2_only
def testKey(self, alg):
key = 1234
gen = random.Generator(
state=[0] * random._get_counter_size(alg.value) + [key], alg=alg)
got = gen.key
self.assertAllEqual(key, got)
@def_function.function
def f():
return gen.key
got = f()
self.assertAllEqual(key, got)
@test_util.run_v2_only
def testSkip(self):
key = 1234
counter = 5678
gen = random.Generator(state=[counter, 0, key], alg=random.RNG_ALG_PHILOX)
delta = 432
gen.skip(delta)
new_counter = gen.state[0]
self.assertAllEqual(counter + delta * 256, new_counter)
def _sameAsOldRandomOps(self, device, floats):
def compare(dtype, old, new):
seed1, seed2 = 79, 25
# note how the two seeds for the old op correspond to the seed for the new
# op
with ops.device(device):
gen = random.Generator(state=[0, seed2, seed1],
alg=random.RNG_ALG_PHILOX)
# create a graph for the old op in order to call it many times
@def_function.function
def run_old():
with ops.device(device):
return old(dtype, seed1, seed2)
def run_new():
with ops.device(device):
return new(dtype, gen)
for _ in range(5):
self.assertAllEqual(run_old(), run_new())
shape = constant_op.constant([4, 7])
minval = 128
maxval = 256
# passing `dtype` around to compress go/gpylint-faq#cell-var-from-loop and
# go/gpylint-faq#undefined-loop-variable
def old_normal(dtype, seed1, seed2):
return gen_random_ops.random_standard_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
def new_normal(dtype, gen):
return gen._standard_normal(shape, dtype=dtype)
def old_truncated_normal(dtype, seed1, seed2):
return gen_random_ops.truncated_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
def new_truncated_normal(dtype, gen):
return gen._truncated_normal(shape, dtype=dtype)
def old_uniform_int(dtype, seed1, seed2):
minval2 = constant_op.constant(minval, dtype=dtype)
maxval2 = constant_op.constant(maxval, dtype=dtype)
return gen_random_ops.random_uniform_int(
shape, minval=minval2, maxval=maxval2, seed=seed1, seed2=seed2)
def new_uniform_int(dtype, gen):
return gen.uniform(shape, minval=minval, maxval=maxval, dtype=dtype)
def old_uniform(dtype, seed1, seed2):
return gen_random_ops.random_uniform(
shape, dtype=dtype, seed=seed1, seed2=seed2)
def new_uniform(dtype, gen):
return gen._uniform(shape, dtype=dtype)
for dtype in floats:
compare(dtype, old_normal, new_normal)
compare(dtype, old_truncated_normal, new_truncated_normal)
compare(dtype, old_uniform, new_uniform)
for dtype in INTS:
compare(dtype, old_uniform_int, new_uniform_int)
@test_util.run_v2_only
def testSameAsOldRandomOpsCPU(self):
"""Tests that the generated numbers are the same as the old random_ops.py.
The CPU version.
"""
self._sameAsOldRandomOps("/device:CPU:0", FLOATS)
@test_util.run_v2_only
@test_util.run_cuda_only
def testSameAsOldRandomOpsGPU(self):
"""Tests that the generated numbers are the same as the old random_ops.py.
The GPU version.
"""
floats = [dtypes.float16, dtypes.float32, dtypes.float64]
if test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
floats += [dtypes.bfloat16]
self._sameAsOldRandomOps(test_util.gpu_device_name(), floats)
@parameterized.parameters(INTS + [dtypes.uint32, dtypes.uint64])
@test_util.run_v2_only
@test_util.run_cuda_only
def testGPUEqualsCPU(self, dtype):
"""Tests that GPU and CPU generate the same integer outputs."""
seed = 1234
shape = [315, 49]
with ops.device("/device:CPU:0"):
cpu = random.Generator.from_seed(seed).uniform_full_int(
shape=shape, dtype=dtype)
with ops.device(test_util.gpu_device_name()):
gpu = random.Generator.from_seed(seed).uniform_full_int(
shape=shape, dtype=dtype)
self.assertAllEqual(cpu, gpu)
@parameterized.parameters(FLOATS + INTS)
@test_util.run_v2_only
def testUniformIsInRange(self, dtype):
if dtype == dtypes.bfloat16 and not test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
self.skipTest("Bfloat16 requires compute capability 8.0")
minval = 2
maxval = 33
size = 1000
gen = random.Generator.from_seed(1234)
x = gen.uniform(
shape=[size], dtype=dtype, minval=minval, maxval=maxval).numpy()
self.assertTrue(np.all(x >= minval))
self.assertTrue(np.all(x < maxval))
@parameterized.parameters(FLOATS)
@test_util.run_v2_only
def testNormalIsFinite(self, dtype):
if dtype == dtypes.bfloat16 and not test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
self.skipTest("Bfloat16 requires compute capability 8.0")
gen = random.Generator.from_seed(1234)
x = gen.normal(shape=[10000], dtype=dtype).numpy()
self.assertTrue(np.all(np.isfinite(x)))
@parameterized.parameters(FLOATS + INTS)
@test_util.run_v2_only
def testDistributionOfUniform(self, dtype):
"""Use Pearson's Chi-squared test to test for uniformity."""
if dtype == dtypes.bfloat16 and not test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
self.skipTest("Bfloat16 requires compute capability 8.0")
n = 1000
seed = 123
gen = random.Generator.from_seed(seed)
maxval = 1
if dtype.is_integer:
maxval = 100
x = gen.uniform(shape=[n], maxval=maxval, dtype=dtype).numpy()
if maxval > 1:
# Normalize y to range [0, 1).
x = x.astype(float) / maxval
# Tests that the values are distributed amongst 10 bins with equal
# probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
# p=0.05. This test is probabilistic and would be flaky if the random
# seed were not fixed.
val = random_test_util.chi_squared(x, 10)
self.assertLess(val, 16.92)
@parameterized.parameters(FLOATS)
@test_util.run_v2_only
def testDistributionOfNormal(self, dtype):
"""Use Anderson-Darling test to test distribution appears normal."""
if dtype == dtypes.bfloat16 and not test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
self.skipTest("Bfloat16 requires compute capability 8.0")
n = 1000
gen = random.Generator.from_seed(1234)
x = gen.normal(shape=[n], dtype=dtype).numpy()
# The constant 2.492 is the 5% critical value for the Anderson-Darling
# test where the mean and variance are known. This test is probabilistic
# so to avoid flakiness the seed is fixed.
self.assertLess(
random_test_util.anderson_darling(x.astype(float)), 2.492)
@test_util.run_v2_only
def testErrors(self):
"""Tests that proper errors are raised.
"""
shape = [2, 3]
gen = random.Generator.from_seed(1234)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
r"must have shape \[\], not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, [0, 0], shape)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
r"must have shape \[\], not"):
gen_stateful_random_ops.rng_skip(
gen.state.handle, gen.algorithm, [0, 0])
with self.assertRaisesWithPredicateMatch(
TypeError, "EagerTensor of dtype int64"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, 1.1, shape)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Unsupported algorithm id"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, 123, shape)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Unsupported algorithm id"):
gen_stateful_random_ops.rng_read_and_skip(
gen.state.handle, alg=123, delta=10)
var = variables.Variable([0, 0], dtype=dtypes.int32)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"dtype of RNG state variable must be int64, not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_PHILOX, shape)
var = variables.Variable([[0]], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"RNG state must have one and only one dimension, not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_PHILOX, shape)
var = variables.Variable([0], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"For the Philox algorithm, the size of state must be at least"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_PHILOX, shape)
with self.assertRaisesWithPredicateMatch(
ValueError,
"minval must be a scalar; got a tensor of shape "):
@def_function.function
def f():
gen.uniform(shape=shape, minval=array_ops.zeros(shape, "int32"),
maxval=100, dtype="int32")
f()
with self.assertRaisesWithPredicateMatch(
ValueError,
"maxval must be a scalar; got a tensor of shape "):
@def_function.function
def f2():
gen.uniform(
shape=shape, minval=0, maxval=array_ops.ones(shape, "int32") * 100,
dtype="int32")
f2()
@test_util.run_v2_only
def testGetGlobalGeneratorWithXla(self):
"""Demonstrates using the global generator with XLA."""
# This test was passing before because soft placement silently picked the
# CPU kernel.
# TODO(wangpeng): Remove this skip
self.skipTest("NonDeterministicInts lacks XLA kernel.")
if not config.list_physical_devices("XLA_CPU"):
self.skipTest("No XLA_CPU device available.")
random.set_global_generator(None)
@def_function.function(jit_compile=True)
def make_seed():
generator = random.get_global_generator()
state = array_ops.identity(generator.state, name="state")
return generator.uniform_full_int((2,), dtypes.int32, name="seed"), state
with ops.device("/device:XLA_CPU:0"):
seed, state = make_seed()
self.assertTrue(np.all(np.isfinite(seed.numpy())))
random.get_global_generator().reset(state)
self.assertAllEqual(make_seed()[0], seed)
@test_util.run_v2_only
def testSetGlobalGeneratorBadWithDefun(self):
"""Demonstrates set_global_generator does not affect compiled tf.function."""
shape = (3,)
@def_function.function
def f():
return random.get_global_generator().normal(shape)
random.set_global_generator(random.Generator.from_seed(50))
samples = f()
# Resetting global generator has no effect to the compiled tf.function.
random.set_global_generator(random.Generator.from_seed(50))
# New samples are returned.
self.assertNotAllEqual(samples, f())
@test_util.run_v2_only
def testFunctionArg(self):
"""Tests that RNG can be used as tf.function's argument.
"""
shape = [2, 3]
@def_function.function
def f(gen):
return gen.normal(shape)
g1 = random.Generator.from_seed(1)
g2 = random.Generator.from_seed(1)
res1 = f(g1)
res2 = g2.normal(shape)
self.assertAllEqual(res1, res2)
self.assertAllEqual(g1.state.read_value(), g2.state.read_value())
@test_util.run_v2_only
def testUniformFullInt(self):
"""Tests full-range int uniform.
"""
shape = [3, 4]
dtype = dtypes.int32
g = random.Generator.from_seed(1)
r1 = g.uniform(shape=shape, dtype=dtype, minval=None)
g = random.Generator.from_seed(1)
r2 = g.uniform_full_int(shape=shape, dtype=dtype)
self.assertAllEqual(r1, r2)
@test_util.run_v2_only
def testRestore(self):
"""Tests save and restore.
"""
fname = os.path.join(self.get_temp_dir(), "checkpoint")
g = random.Generator.from_seed(1)
cp = tracking_util.Checkpoint(g=g)
def write_restore_compare():
cp.write(fname)
r1 = g.uniform([], dtype=dtypes.uint32, minval=None)
cp.restore(fname)
r2 = g.uniform([], dtype=dtypes.uint32, minval=None)
self.assertAllEqual(r1, r2)
# Run multiple times so that cp.write is called in various RNG states
for _ in range(2):
write_restore_compare()
@test_util.run_v2_only
def testDeterministicOpsErrors(self):
try:
config.enable_op_determinism()
random.set_global_generator(None)
with self.assertRaisesWithPredicateMatch(
RuntimeError,
'"get_global_generator" cannot be called if determinism is enabled'):
random.get_global_generator()
random.set_global_generator(random.Generator.from_seed(50))
random.get_global_generator()
with self.assertRaisesWithPredicateMatch(
RuntimeError,
'"from_non_deterministic_state" cannot be called when determinism '
"is enabled."):
random.Generator.from_non_deterministic_state()
finally:
config.disable_op_determinism()
if __name__ == "__main__":
config.set_soft_device_placement(False)
test.main()
| StatefulRandomOpsTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-total-distance-traveled.py | {
"start": 1104,
"end": 1799
} | class ____(object):
def minimumTotalDistance(self, robot, factory):
"""
:type robot: List[int]
:type factory: List[List[int]]
:rtype: int
"""
robot.sort(), factory.sort()
dp = [float("inf")]*(len(robot)+1) # dp[j] at i: min of factory[:i+1] and robot[:j]
dp[0] = 0
for i in xrange(len(factory)):
for j in reversed(xrange(1, len(robot)+1)):
curr = 0
for k in xrange(min(factory[i][1], j)+1):
dp[j] = min(dp[j], dp[j-k]+curr)
if (j-1)-k >= 0:
curr += abs(robot[(j-1)-k]-factory[i][0])
return dp[-1]
| Solution2 |
python | marshmallow-code__marshmallow | src/marshmallow/validate.py | {
"start": 23138,
"end": 23931
} | class ____(NoneOf):
"""Validator which fails if ``value`` is a sequence and any element
in the sequence is a member of the sequence passed as ``iterable``. Empty input
is considered valid.
:param iterable: Same as :class:`NoneOf`.
:param error: Same as :class:`NoneOf`.
.. versionadded:: 3.6.0
"""
default_message = "One or more of the choices you made was in: {values}."
def _format_error(self, value) -> str:
value_text = ", ".join(str(val) for val in value)
return super()._format_error(value_text)
def __call__(self, value: typing.Sequence[_T]) -> typing.Sequence[_T]:
for val in value:
if val in self.iterable:
raise ValidationError(self._format_error(value))
return value
| ContainsNoneOf |
python | PyCQA__pylint | tests/regrtest_data/max_inferable_limit_for_classes/nodes/roles.py | {
"start": 160,
"end": 221
} | class ____(ColumnArgumentRole):
...
| ColumnArgumentOrKeyRole |
python | realpython__materials | nearbyshops/shops/apps.py | {
"start": 36,
"end": 85
} | class ____(AppConfig):
name = "shops"
| ShopsConfig |
python | google__jax | jax/_src/config.py | {
"start": 57713,
"end": 82605
} | class ____(enum.StrEnum):
STANDARD = 'standard'
STRICT = 'strict'
numpy_dtype_promotion = enum_class_state(
name='jax_numpy_dtype_promotion',
enum_class=NumpyDtypePromotion,
default=NumpyDtypePromotion.STANDARD,
help=('Specify the rules used for implicit type promotion in operations '
'between arrays. Options are "standard" or "strict"; in strict-mode, '
'binary operations between arrays of differing strongly-specified '
'dtypes will result in an error.'),
include_in_jit_key=True,
include_in_trace_context=True)
disallow_mesh_context_manager = bool_state(
name='jax_disallow_mesh_context_manager',
default=False,
help=(
'If set to True, trying to use a mesh as a context manager will'
' result in a RuntimeError.'
),
)
# TODO(ayx): Move these 3 flags out of config once we have a user-level
# extension mechanism for adding contexts to which the jit cache is sensitive.
error_checking_behavior_nan = enum_state(
name='jax_error_checking_behavior_nan',
enum_values=['ignore', 'raise'],
default='ignore',
help=(
'Specify the behavior when a NaN is encountered. Options are "ignore"'
' or "raise".'
),
include_in_jit_key=True,
include_in_trace_context=True,
)
error_checking_behavior_divide = enum_state(
name='jax_error_checking_behavior_divide',
enum_values=['ignore', 'raise'],
default='ignore',
help=(
'Specify the behavior when a divide by zero is encountered. Options are'
' "ignore" or "raise".'
),
include_in_jit_key=True,
include_in_trace_context=True,
)
error_checking_behavior_oob = enum_state(
name='jax_error_checking_behavior_oob',
enum_values=['ignore', 'raise'],
default='ignore',
help=(
'Specify the behavior when an out of bounds access is encountered.'
' Options are "ignore" or "raise".'
),
include_in_jit_key=True,
include_in_trace_context=True,
)
enable_x64 = bool_state(
name='jax_enable_x64',
default=False,
help='Enable 64-bit types to be used',
include_in_jit_key=True,
include_in_trace_context=True)
jax_jit.set_enable_x64_state(enable_x64)
# TODO(phawkins): remove after fixing users of FLAGS.x64_enabled.
config._contextmanager_flags.remove('jax_enable_x64')
setattr(Config, "x64_enabled", property(lambda _: enable_x64.value))
def _validate_default_device(val):
if (val is not None and
not isinstance(val, xla_client.Device) and
val not in ['cpu', 'gpu', 'tpu']):
# TODO(skyewm): this is a workaround for non-PJRT Device types. Remove when
# all JAX backends use a single C++ device interface.
if 'Device' in str(type(val)):
logger.info(
'Allowing non-`xla_client.Device` default device: %s, type: %s',
repr(val), type(val))
return
raise ValueError('jax.default_device must be passed either a Device object (e.g. '
f"`jax.devices('cpu')[0]`) or a platform name string like 'cpu' or 'gpu'"
f", got: {val!r}")
default_device = string_or_object_state(
name='jax_default_device',
default=None,
help=(
'Configure the default device for JAX operations. Set to a Device '
'object (e.g. ``jax.devices("cpu")[0]``) to use that Device as the '
'default device for JAX operations and jit\'d function calls (there is '
'no effect on multi-device computations, e.g. pmapped function calls). '
'Set to None to use the system default device. See '
':ref:`faq-data-placement` for more information on device placement.'),
validator=_validate_default_device,
include_in_jit_key=True,
include_in_trace_context=True)
disable_jit = bool_state(
name='jax_disable_jit',
default=False,
help=('Disable JIT compilation and just call original Python.'),
include_in_trace_context=True)
jax_jit.set_disable_jit_state(disable_jit)
numpy_rank_promotion = enum_state(
name='jax_numpy_rank_promotion',
enum_values=['allow', 'warn', 'raise'],
default='allow',
help=('Control NumPy-style automatic rank promotion broadcasting '
'("allow", "warn", or "raise").'),
include_in_jit_key=True,
include_in_trace_context=True)
default_matmul_precision = optional_enum_state(
name='jax_default_matmul_precision',
enum_values=[
# Legacy precision API values
'default', 'high', 'highest', 'bfloat16', 'tensorfloat32', 'float32',
# Dot algorithm presets
'ANY_F8_ANY_F8_F32', 'ANY_F8_ANY_F8_F32_FAST_ACCUM', 'ANY_F8_ANY_F8_ANY',
'ANY_F8_ANY_F8_ANY_FAST_ACCUM', 'F16_F16_F16', 'F16_F16_F32',
'BF16_BF16_BF16', 'BF16_BF16_F32', 'BF16_BF16_F32_X3',
'BF16_BF16_F32_X6', 'BF16_BF16_F32_X9', 'TF32_TF32_F32',
'TF32_TF32_F32_X3', 'F32_F32_F32', 'F64_F64_F64',
],
default=None,
help=('Control the default matmul and conv precision for 32bit inputs.\n\n'
'Some platforms, like TPU, offer configurable precision levels for '
'matrix multiplication and convolution computations, trading off '
'accuracy for speed. The precision can be controlled for each '
'operation; for example, see the :func:`jax.lax.conv_general_dilated` '
'and :func:`jax.lax.dot` docstrings. But it can be useful to control '
'the default behavior obtained when an operation is not given a '
'specific precision.\n\n'
'This option can be used to control the default precision '
'level for computations involved in matrix multiplication and '
'convolution on 32bit inputs. The levels roughly describe the '
"precision at which scalar products are computed. The 'bfloat16' "
"option is the fastest and least precise; 'float32' is similar to "
"full float32 precision; 'tensorfloat32' is intermediate.\n\n"
'This parameter can also be used to specify an accumulation '
'"algorithm" for functions that perform matrix multiplications, like '
':func:`jax.lax.dot`. To specify an algorithm, set this option to '
'the name of a :class:`~jax.lax.DotAlgorithmPreset`.\n\n'),
include_in_jit_key=True,
include_in_trace_context=True)
traceback_filtering = enum_state(
name = 'jax_traceback_filtering',
enum_values=["off", "tracebackhide", "remove_frames", "quiet_remove_frames",
"auto"],
default="auto",
help="Controls how JAX filters internal frames out of tracebacks. Valid values are:\n"
"- ``off``: disables traceback filtering.\n"
"- ``auto``: use ``tracebackhide`` if running under a sufficiently "
"new IPython, or ``remove_frames`` otherwise.\n"
"- ``tracebackhide``: adds ``__tracebackhide__`` annotations to "
"hidden stack frames, which some traceback printers support.\n"
"- ``remove_frames``: removes hidden frames from tracebacks, and adds "
"the unfiltered traceback as a ``__cause__`` of the exception.\n"
"- ``quiet_remove_frames``: removes hidden frames from tracebacks, and adds "
"a brief message (to the ``__cause__`` of the exception) describing that this has "
"happened.\n\n")
# This flag is for internal use.
# TODO(tianjianlu): Removes once we always enable cusparse lowering.
# TODO(b/262050896): Set to true after bug is fixed
bcoo_cusparse_lowering = bool_state(
name='jax_bcoo_cusparse_lowering',
default=False,
help=('Enables lowering BCOO ops to cuSparse.'))
# TODO(mattjj): remove this flag when we ensure we only succeed at trace-staging
# if the intended backend can handle lowering the result
dynamic_shapes = bool_state(
name='jax_dynamic_shapes',
default=False,
help=('Enables experimental features for staging out computations with '
'dynamic shapes.'),
include_in_jit_key=True,
include_in_trace_context=True)
# This is for stackless backward compat with e.g. equinox
eager_constant_folding = bool_state(
name='eager_constant_folding',
default=False,
help=('Attempt constant folding during staging.'),
include_in_jit_key=True,
include_in_trace_context=True)
enable_remat_opt_pass = bool_state(
name='jax_compiler_enable_remat_pass',
default=True,
help=('Config to enable / disable the rematerialization HLO pass. '
'Useful to allow XLA to automatically trade off memory and '
'compute when encountering OOM errors. However, you are '
'likely to get better results manually with jax.checkpoint'))
no_tracing = bool_state(
name='jax_no_tracing',
default=False,
help='Disallow tracing for JIT compilation.')
no_execution = bool_state(
name='jax_no_execution',
default=False,
help='Disallow JAX executions.',
include_in_jit_key=True,
include_in_trace_context=True)
disable_vmap_shmap_error = bool_state(
name='jax_disable_vmap_shmap_error',
default=False,
upgrade=False,
help='Temporary workaround to disable an error check in vmap-of-shmap.')
# TODO(mattjj): remove once we land mutable array plumbing, or face great shame
custom_vjp_disable_shape_check = bool_state(
name='jax_custom_vjp_disable_shape_check',
default=False,
upgrade=True,
help='Disable the check from #19009 to enable some custom_vjp hacks.')
mutable_array_checks = bool_state(
name='jax_mutable_array_checks',
default=True,
upgrade=True,
help='Enable error checks for mutable arrays that rule out aliasing.',
include_in_trace_context=True)
vjp3 = bool_state(
name='jax_vjp3',
default=True,
upgrade=True,
help='Use new backward-pass code in jax.vjp')
refs_to_pins = bool_state(
name='jax_refs_to_pins',
default=False,
upgrade=True,
help='Lower refs to pinned buffers in HLO.')
xla_runtime_errors = bool_state(
name='jax_experimental_unsafe_xla_runtime_errors',
default=False,
help=('Enable XLA runtime errors for jax.experimental.checkify.checks '
'on CPU and GPU. These errors are async, might get lost and are not '
'very readable. But, they crash the computation and enable you '
'to write jittable checks without needing to checkify. Does not '
'work under pmap/pjit.')
)
jax_xla_profile_version = int_state(
name='jax_xla_profile_version',
default=0,
help=(
'Optional profile version for XLA compilation. This is meaningful '
'only when XLA is configured to support the remote compilation '
'profile feature.'),
include_in_jit_key=True,
include_in_trace_context=True,
)
@contextlib.contextmanager
def explicit_device_put_scope() -> Iterator[None]:
"""Indicates that the current context is an explicit device_put*() call."""
state = guard_lib.thread_local_state()
prev = state.explicit_device_put
state.explicit_device_put = True
try:
yield
finally:
state.explicit_device_put = prev
@contextlib.contextmanager
def explicit_device_get_scope() -> Iterator[None]:
"""Indicates that the current context is an explicit device_get() call."""
state = guard_lib.thread_local_state()
prev = state.explicit_device_get
state.explicit_device_get = True
try:
yield
finally:
state.explicit_device_get = prev
def _update_transfer_guard(state, key, val):
"""Applies the transfer guard level within guard_lib."""
if val is None:
setattr(state, key, None)
elif val == 'allow':
setattr(state, key, guard_lib.TransferGuardLevel.ALLOW)
elif val == 'log':
setattr(state, key, guard_lib.TransferGuardLevel.LOG)
elif val == 'disallow':
setattr(state, key, guard_lib.TransferGuardLevel.DISALLOW)
elif val == 'log_explicit':
setattr(state, key, guard_lib.TransferGuardLevel.LOG_EXPLICIT)
elif val == 'disallow_explicit':
setattr(state, key, guard_lib.TransferGuardLevel.DISALLOW_EXPLICIT)
else:
assert False, f'Invalid transfer guard level {val}'
transfer_guard_host_to_device = optional_enum_state(
name='jax_transfer_guard_host_to_device',
enum_values=[
'allow', 'log', 'disallow', 'log_explicit', 'disallow_explicit'
],
# The default is applied by guard_lib. Use None here to avoid accidentally
# overriding --jax_transfer_guard.
default=None,
help=('Select the transfer guard level for host-to-device transfers. '
'Default is "allow".'),
update_global_hook=lambda val: _update_transfer_guard(
guard_lib.global_state(), 'host_to_device', val),
update_thread_local_hook=lambda val: _update_transfer_guard(
guard_lib.thread_local_state(), 'host_to_device', val))
transfer_guard_device_to_device = optional_enum_state(
name='jax_transfer_guard_device_to_device',
enum_values=[
'allow', 'log', 'disallow', 'log_explicit', 'disallow_explicit'
],
# The default is applied by guard_lib. Use None here to avoid accidentally
# overriding --jax_transfer_guard.
default=None,
help=('Select the transfer guard level for device-to-device transfers. '
'Default is "allow".'),
update_global_hook=lambda val: _update_transfer_guard(
guard_lib.global_state(), 'device_to_device', val),
update_thread_local_hook=lambda val: _update_transfer_guard(
guard_lib.thread_local_state(), 'device_to_device', val))
transfer_guard_device_to_host = optional_enum_state(
name='jax_transfer_guard_device_to_host',
enum_values=[
'allow', 'log', 'disallow', 'log_explicit', 'disallow_explicit'
],
# The default is applied by guard_lib. Use None here to avoid
# accidentally overriding --jax_transfer_guard.
default=None,
help=('Select the transfer guard level for device-to-host transfers. '
'Default is "allow".'),
update_global_hook=lambda val: _update_transfer_guard(
guard_lib.global_state(), 'device_to_host', val
),
update_thread_local_hook=lambda val: _update_transfer_guard(
guard_lib.thread_local_state(), 'device_to_host', val))
def _update_all_transfer_guard_global(val):
for name in ('jax_transfer_guard_host_to_device',
'jax_transfer_guard_device_to_device',
'jax_transfer_guard_device_to_host'):
config.update(name, val)
_transfer_guard = optional_enum_state(
name='jax_transfer_guard',
enum_values=[
'allow', 'log', 'disallow', 'log_explicit', 'disallow_explicit'
],
# The default is applied by guard_lib. Use None here to avoid accidentally
# overriding --jax_transfer_guard_*.
default=None,
help=('Select the transfer guard level for all transfers. This option is '
'set-only; the transfer guard level for a specific direction should '
'be read using the per-transfer direction option. '
'Default is "allow".'),
update_global_hook=_update_all_transfer_guard_global)
@contextlib.contextmanager
def transfer_guard(new_val: str) -> Iterator[None]:
"""A contextmanager to control the transfer guard level for all transfers.
For more information, see
https://docs.jax.dev/en/latest/transfer_guard.html
Args:
new_val: The new thread-local transfer guard level for all transfers.
Yields:
None.
"""
with contextlib.ExitStack() as stack:
stack.enter_context(transfer_guard_host_to_device(new_val))
stack.enter_context(transfer_guard_device_to_device(new_val))
stack.enter_context(transfer_guard_device_to_host(new_val))
stack.enter_context(_transfer_guard(new_val))
yield
def _update_garbage_collection_guard(state, key, val):
"""Applies the transfer guard level within guard_lib."""
if val is None:
setattr(state, key, None)
elif val == 'allow':
setattr(state, key, guard_lib.GarbageCollectionGuardLevel.ALLOW)
elif val == 'log':
setattr(state, key, guard_lib.GarbageCollectionGuardLevel.LOG)
elif val == 'fatal':
setattr(state, key, guard_lib.GarbageCollectionGuardLevel.FATAL)
else:
assert False, f'Invalid garbage collection guard level {val}'
array_garbage_collection_guard = optional_enum_state(
name='jax_array_garbage_collection_guard',
enum_values=['allow', 'log', 'fatal'],
# The default is applied by guard_lib.
default=None,
help=(
'Select garbage collection guard level for ``jax.Array`` objects.\n\n'
'This option can be used to control what happens when a ``jax.Array``'
' object is garbage collected. It is desirable for ``jax.Array``'
' objects to be freed by Python reference counting rather than garbage'
' collection in order to avoid device memory being held by the arrays'
' until garbage collection occurs.\n\n'
'Valid values are:\n\n'
'* ``allow``: do not log garbage collection of ``jax.Array`` objects.\n'
'* ``log``: log an error when a ``jax.Array`` is garbage collected.\n'
'* ``fatal``: fatal error if a ``jax.Array`` is garbage collected.\n\n'
'Default is ``allow``. Note that not all cycles may be detected.'
),
update_global_hook=lambda val: _update_garbage_collection_guard(
guard_lib.global_state(), 'garbage_collect_array', val
),
update_thread_local_hook=lambda val: _update_garbage_collection_guard(
guard_lib.thread_local_state(), 'garbage_collect_array', val
),
)
# TODO(nbasile): Remove hasattr checks after jaxlib 0.8.1 release
if hasattr(_jax, 'RuntimeTracebackMode'):
class RuntimeTracebackMode(enum.StrEnum):
OFF = 'off'
ON = 'on'
FULL = 'full'
@classmethod
def _missing_(cls, value):
if isinstance(value, str):
try:
return cls[value.upper()]
except KeyError:
pass
return None
def as_cpp_enum(self):
return getattr(_jax.RuntimeTracebackMode, self.name)
send_traceback_to_runtime = enum_class_state(
name='jax_send_traceback_to_runtime',
enum_class=RuntimeTracebackMode,
default=RuntimeTracebackMode.OFF,
help=(
'Controls the level of Python traceback information sent to the'
' runtime at dispatch time:\n- "OFF": (default) No Python traceback'
' information is sent.\n- "ON": Only the most recent user frame call'
' location is sent.\n- "FULL": The full Python traceback of the call'
' location is sent. This has a high fixed cost on the dispatch path'
' and should be used only for debugging.'
),
update_global_hook=lambda val: _jax.set_send_traceback_to_runtime_global(
val.as_cpp_enum() if val is not None else _jax.RuntimeTracebackMode.OFF),
update_thread_local_hook=lambda val: _jax.set_send_traceback_to_runtime_thread_local(
val.as_cpp_enum() if val is not None else None),
)
# Don't define a context manager since this isn't threadsafe.
string_state(
name='jax_debug_log_modules',
default='',
help=('Comma-separated list of module names (e.g. "jax" or '
'"jax._src.xla_bridge,jax._src.dispatch") to enable debug logging '
'for.'),
update_global_hook=logging_config.update_debug_log_modules)
# Don't define a context manager since this isn't threadsafe.
optional_enum_state(
name='jax_logging_level',
enum_values=['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default=logging.getLevelName(logging.getLogger("jax").level),
help=('Set the corresponding logging level on all jax loggers. Only string'
' values from ["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR",'
' "CRITICAL"] are accepted. If None, the logging level will not be'
' set. Includes C++ logging.'),
update_global_hook=lambda logging_level: \
logging_config.update_logging_level_global(logging_level=logging_level)
)
def _default_pmap_no_rank_reduction(new_val):
if not new_val:
deprecations.warn(
'jax-pmap-no-rank-reduction',
(
'Setting `jax_pmap_no_rank_reduction` to `False` is deprecated in '
'JAX v0.7.2 and will be removed in JAX v0.9.0.'
),
stacklevel=3,
)
pmap_no_rank_reduction = bool_state(
name='jax_pmap_no_rank_reduction',
default=True,
help=(
'[deprecated] If True, pmap shards have the same rank as their '
'enclosing array. Setting to `False` is deprecated and in the future '
'all `pmap` calls will proceed without rank reduction.'
),
validator=_default_pmap_no_rank_reduction,
)
use_shardy_partitioner = bool_state(
name='jax_use_shardy_partitioner',
default=True,
upgrade=True,
help=(
'Whether to lower to Shardy. See the migration guide for more '
'information: https://docs.jax.dev/en/latest/shardy_jax_migration.html.'
),
include_in_jit_key=True,
include_in_trace_context=True,
)
gpu_use_magma = enum_state(
name='jax_use_magma',
enum_values=['off', 'on', 'auto'],
default='auto',
help=(
'Enable experimental support for MAGMA-backed lax.linalg.eig on GPU. '
'See the documentation for lax.linalg.eig for more details about how '
'to use this feature.'
),
)
exec_time_optimization_effort = float_state(
name='jax_exec_time_optimization_effort',
default=0.0,
help='Effort for minimizing execution time (higher means more effort), valid range [-1.0, 1.0].'
)
memory_fitting_effort = float_state(
name='jax_memory_fitting_effort',
default=0.0,
help='Effort for minimizing memory usage (higher means more effort), valid range [-1.0, 1.0].'
)
optimization_level = enum_state(
name='jax_optimization_level',
enum_values=[
'UNKNOWN',
'O0',
'O1',
'O2',
'O3',
],
default='UNKNOWN',
help='The degree to which the compiler should optimize for execution time',
include_in_jit_key=True
)
memory_fitting_level = enum_state(
name='jax_memory_fitting_level',
enum_values=[
'UNKNOWN',
'O0',
'O1',
'O2',
'O3',
],
default='O2',
help=(
'The degree to which the compiler should attempt to make the program'
' fit in memory'
),
include_in_jit_key=True,
)
DEFAULT_CPU_COLLECTIVES_IMPL = "gloo"
cpu_collectives_implementation = optional_enum_state(
name='jax_cpu_collectives_implementation',
enum_values=["gloo", "mpi", "megascale"],
default=DEFAULT_CPU_COLLECTIVES_IMPL,
help=(
"Cross-process collective implementation used on CPU. Must be one of "
'("gloo", "mpi")'),
)
use_high_dynamic_range_gumbel = bool_state(
name='jax_high_dynamic_range_gumbel',
default=False,
help='If True, gumble noise draws two samples to cover low probability '
'events with more precision.',
include_in_trace_context=True,
)
jax_dump_ir_to = string_flag(
name='jax_dump_ir_to',
default=os.getenv('JAX_DUMP_IR_TO', ''),
help="Path to which IR(s) emitted by JAX should be dumped as text files."
"If omitted, JAX will not dump any IR. "
"Supports the special value 'sponge' to pick the path from the "
"environment variable TEST_UNDECLARED_OUTPUTS_DIR. See "
"jax_dump_ir_modes for options governing what is dumped.")
jax_include_debug_info_in_dumps = bool_flag(
name='jax_include_debug_info_in_dumps',
default=bool_env('JAX_INCLUDE_DEBUG_INFO_IN_DUMPS', True),
help='Determine whether or not to keep debug symbols and location '
'information when dumping IR code. By default, debug information will '
'be preserved in the IR dump. To avoid exposing source code and '
'potentially sensitive information, set to false ')
# TODO(dsuo): Turn this into a list-valued flag.
jax_dump_ir_modes = string_flag(
name="jax_dump_ir_modes",
default=os.getenv("JAX_DUMP_IR_MODES", "stablehlo"),
help="Comma-delimited modes in which to dump IR. Can be 'stablehlo' (the "
"default), 'jaxpr', or 'eqn_count_pprof' for "
"jaxpr equation count pprof profile.")
jax_ragged_dot_use_ragged_dot_instruction = bool_state(
name='jax_ragged_dot_use_ragged_dot_instruction',
default=True,
help=(
'(TPU only) If True, use chlo.ragged_dot instruction for ragged_dot()'
' lowering. Otherwise, rely on the rollout logic in lowering rule for'
' ragged_dot_general_p.'
),
)
jax_collectives_common_channel_id = bool_flag(
name='jax_collectives_common_channel_id',
default=True,
help="Should collectives use a common channel ID? Temporary feature flag.",
)
jax_pallas_verbose_errors = bool_flag(
"jax_pallas_verbose_errors",
default=bool_env("JAX_PALLAS_VERBOSE_ERRORS", False),
help="If True, print verbose error messages for Pallas kernels.",
)
| NumpyDtypePromotion |
python | doocs__leetcode | solution/3300-3399/3335.Total Characters in String After Transformations I/Solution.py | {
"start": 0,
"end": 437
} | class ____:
def lengthAfterTransformations(self, s: str, t: int) -> int:
f = [[0] * 26 for _ in range(t + 1)]
for c in s:
f[0][ord(c) - ord("a")] += 1
for i in range(1, t + 1):
f[i][0] = f[i - 1][25]
f[i][1] = f[i - 1][0] + f[i - 1][25]
for j in range(2, 26):
f[i][j] = f[i - 1][j - 1]
mod = 10**9 + 7
return sum(f[t]) % mod
| Solution |
python | aio-libs__aiohttp | aiohttp/web_runner.py | {
"start": 10502,
"end": 12604
} | class ____(BaseRunner[Request]):
"""Web Application runner"""
__slots__ = ("_app",)
def __init__(
self,
app: Application,
*,
handle_signals: bool = False,
access_log_class: type[AbstractAccessLogger] = AccessLogger,
**kwargs: Any,
) -> None:
if not isinstance(app, Application):
raise TypeError(
f"The first argument should be web.Application instance, got {app!r}"
)
kwargs["access_log_class"] = access_log_class
if app._handler_args:
for k, v in app._handler_args.items():
kwargs[k] = v
if not issubclass(kwargs["access_log_class"], AbstractAccessLogger):
raise TypeError(
"access_log_class must be subclass of "
"aiohttp.abc.AbstractAccessLogger, got {}".format(
kwargs["access_log_class"]
)
)
super().__init__(handle_signals=handle_signals, **kwargs)
self._app = app
@property
def app(self) -> Application:
return self._app
async def shutdown(self) -> None:
await self._app.shutdown()
async def _make_server(self) -> Server[Request]:
self._app.on_startup.freeze()
await self._app.startup()
self._app.freeze()
return Server(
self._app._handle,
request_factory=self._make_request,
**self._kwargs,
)
def _make_request(
self,
message: RawRequestMessage,
payload: StreamReader,
protocol: RequestHandler[Request],
writer: AbstractStreamWriter,
task: "asyncio.Task[None]",
_cls: type[Request] = Request,
) -> Request:
loop = asyncio.get_running_loop()
return _cls(
message,
payload,
protocol,
writer,
task,
loop,
client_max_size=self.app._client_max_size,
)
async def _cleanup_server(self) -> None:
await self._app.cleanup()
| AppRunner |
python | sympy__sympy | sympy/sets/sets.py | {
"start": 65807,
"end": 80353
} | class ____(Set):
""" Represents the disjoint union (also known as the external disjoint union)
of a finite number of sets.
Examples
========
>>> from sympy import DisjointUnion, FiniteSet, Interval, Union, Symbol
>>> A = FiniteSet(1, 2, 3)
>>> B = Interval(0, 5)
>>> DisjointUnion(A, B)
DisjointUnion({1, 2, 3}, Interval(0, 5))
>>> DisjointUnion(A, B).rewrite(Union)
Union(ProductSet({1, 2, 3}, {0}), ProductSet(Interval(0, 5), {1}))
>>> C = FiniteSet(Symbol('x'), Symbol('y'), Symbol('z'))
>>> DisjointUnion(C, C)
DisjointUnion({x, y, z}, {x, y, z})
>>> DisjointUnion(C, C).rewrite(Union)
ProductSet({x, y, z}, {0, 1})
References
==========
https://en.wikipedia.org/wiki/Disjoint_union
"""
def __new__(cls, *sets):
dj_collection = []
for set_i in sets:
if isinstance(set_i, Set):
dj_collection.append(set_i)
else:
raise TypeError("Invalid input: '%s', input args \
to DisjointUnion must be Sets" % set_i)
obj = Basic.__new__(cls, *dj_collection)
return obj
@property
def sets(self):
return self.args
@property
def is_empty(self):
return fuzzy_and(s.is_empty for s in self.sets)
@property
def is_finite_set(self):
all_finite = fuzzy_and(s.is_finite_set for s in self.sets)
return fuzzy_or([self.is_empty, all_finite])
@property
def is_iterable(self):
if self.is_empty:
return False
iter_flag = True
for set_i in self.sets:
if not set_i.is_empty:
iter_flag = iter_flag and set_i.is_iterable
return iter_flag
def _eval_rewrite_as_Union(self, *sets, **kwargs):
"""
Rewrites the disjoint union as the union of (``set`` x {``i``})
where ``set`` is the element in ``sets`` at index = ``i``
"""
dj_union = S.EmptySet
index = 0
for set_i in sets:
if isinstance(set_i, Set):
cross = ProductSet(set_i, FiniteSet(index))
dj_union = Union(dj_union, cross)
index = index + 1
return dj_union
def _contains(self, element):
"""
``in`` operator for DisjointUnion
Examples
========
>>> from sympy import Interval, DisjointUnion
>>> D = DisjointUnion(Interval(0, 1), Interval(0, 2))
>>> (0.5, 0) in D
True
>>> (0.5, 1) in D
True
>>> (1.5, 0) in D
False
>>> (1.5, 1) in D
True
Passes operation on to constituent sets
"""
if not isinstance(element, Tuple) or len(element) != 2:
return S.false
if not element[1].is_Integer:
return S.false
if element[1] >= len(self.sets) or element[1] < 0:
return S.false
return self.sets[element[1]]._contains(element[0])
def _kind(self):
if not self.args:
return SetKind()
elif all(i.kind == self.args[0].kind for i in self.args):
return self.args[0].kind
else:
return SetKind(UndefinedKind)
def __iter__(self):
if self.is_iterable:
iters = []
for i, s in enumerate(self.sets):
iters.append(iproduct(s, {Integer(i)}))
return iter(roundrobin(*iters))
else:
raise ValueError("'%s' is not iterable." % self)
def __len__(self):
"""
Returns the length of the disjoint union, i.e., the number of elements in the set.
Examples
========
>>> from sympy import FiniteSet, DisjointUnion, EmptySet
>>> D1 = DisjointUnion(FiniteSet(1, 2, 3, 4), EmptySet, FiniteSet(3, 4, 5))
>>> len(D1)
7
>>> D2 = DisjointUnion(FiniteSet(3, 5, 7), EmptySet, FiniteSet(3, 5, 7))
>>> len(D2)
6
>>> D3 = DisjointUnion(EmptySet, EmptySet)
>>> len(D3)
0
Adds up the lengths of the constituent sets.
"""
if self.is_finite_set:
size = 0
for set in self.sets:
size += len(set)
return size
else:
raise ValueError("'%s' is not a finite set." % self)
def imageset(*args):
r"""
Return an image of the set under transformation ``f``.
Explanation
===========
If this function cannot compute the image, it returns an
unevaluated ImageSet object.
.. math::
\{ f(x) \mid x \in \mathrm{self} \}
Examples
========
>>> from sympy import S, Interval, imageset, sin, Lambda
>>> from sympy.abc import x
>>> imageset(x, 2*x, Interval(0, 2))
Interval(0, 4)
>>> imageset(lambda x: 2*x, Interval(0, 2))
Interval(0, 4)
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), Interval(-2, 1))
>>> imageset(sin, Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), Interval(-2, 1))
>>> imageset(lambda y: x + y, Interval(-2, 1))
ImageSet(Lambda(y, x + y), Interval(-2, 1))
Expressions applied to the set of Integers are simplified
to show as few negatives as possible and linear expressions
are converted to a canonical form. If this is not desirable
then the unevaluated ImageSet should be used.
>>> imageset(x, -2*x + 5, S.Integers)
ImageSet(Lambda(x, 2*x + 1), Integers)
See Also
========
sympy.sets.fancysets.ImageSet
"""
from .fancysets import ImageSet
from .setexpr import set_function
if len(args) < 2:
raise ValueError('imageset expects at least 2 args, got: %s' % len(args))
if isinstance(args[0], (Symbol, tuple)) and len(args) > 2:
f = Lambda(args[0], args[1])
set_list = args[2:]
else:
f = args[0]
set_list = args[1:]
if isinstance(f, Lambda):
pass
elif callable(f):
nargs = getattr(f, 'nargs', {})
if nargs:
if len(nargs) != 1:
raise NotImplementedError(filldedent('''
This function can take more than 1 arg
but the potentially complicated set input
has not been analyzed at this point to
know its dimensions. TODO
'''))
N = nargs.args[0]
if N == 1:
s = 'x'
else:
s = [Symbol('x%i' % i) for i in range(1, N + 1)]
else:
s = inspect.signature(f).parameters
dexpr = _sympify(f(*[Dummy() for i in s]))
var = tuple(uniquely_named_symbol(
Symbol(i), dexpr) for i in s)
f = Lambda(var, f(*var))
else:
raise TypeError(filldedent('''
expecting lambda, Lambda, or FunctionClass,
not \'%s\'.''' % func_name(f)))
if any(not isinstance(s, Set) for s in set_list):
name = [func_name(s) for s in set_list]
raise ValueError(
'arguments after mapping should be sets, not %s' % name)
if len(set_list) == 1:
set = set_list[0]
try:
# TypeError if arg count != set dimensions
r = set_function(f, set)
if r is None:
raise TypeError
if not r:
return r
except TypeError:
r = ImageSet(f, set)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
# XXX: Maybe this should just be:
# f2 = set.lambda
# fun = Lambda(f2.signature, f(*f2.expr))
# return imageset(fun, *set.base_sets)
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
x = set.lamda.variables[0]
y = f.variables[0]
return imageset(
Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets)
if r is not None:
return r
return ImageSet(f, *set_list)
def is_function_invertible_in_set(func, setv):
"""
Checks whether function ``func`` is invertible when the domain is
restricted to set ``setv``.
"""
# Functions known to always be invertible:
if func in (exp, log):
return True
u = Dummy("u")
fdiff = func(u).diff(u)
# monotonous functions:
# TODO: check subsets (`func` in `setv`)
if (fdiff > 0) == True or (fdiff < 0) == True:
return True
# TODO: support more
return None
def simplify_union(args):
"""
Simplify a :class:`Union` using known rules.
Explanation
===========
We first start with global rules like 'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent. This process depends
on ``union_sets(a, b)`` functions.
"""
from sympy.sets.handlers.union import union_sets
# ===== Global Rules =====
if not args:
return S.EmptySet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
a = (x for set in finite_sets for x in set)
finite_set = FiniteSet(*a)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - {s}:
new_set = union_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = {new_set}
new_args = (args - {s, t}).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(*args, evaluate=False)
def simplify_intersection(args):
"""
Simplify an intersection using known rules.
Explanation
===========
We first start with global rules like
'if any empty sets return empty set' and 'distribute any unions'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
if not args:
return S.UniversalSet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# If any EmptySets return EmptySet
if S.EmptySet in args:
return S.EmptySet
# Handle Finite sets
rv = Intersection._handle_finite_sets(args)
if rv is not None:
return rv
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - {s}
if len(other_sets) > 0:
other = Intersection(*other_sets)
return Union(*(Intersection(arg, other) for arg in s.args))
else:
return Union(*s.args)
for s in args:
if s.is_Complement:
args.remove(s)
other_sets = args + [s.args[0]]
return Complement(Intersection(*other_sets), s.args[1])
from sympy.sets.handlers.intersection import intersection_sets
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - {s}:
new_set = intersection_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - {s, t}).union({new_set})
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(*args, evaluate=False)
def _handle_finite_sets(op, x, y, commutative):
# Handle finite sets:
fs_args, other = sift([x, y], lambda x: isinstance(x, FiniteSet), binary=True)
if len(fs_args) == 2:
return FiniteSet(*[op(i, j) for i in fs_args[0] for j in fs_args[1]])
elif len(fs_args) == 1:
sets = [_apply_operation(op, other[0], i, commutative) for i in fs_args[0]]
return Union(*sets)
else:
return None
def _apply_operation(op, x, y, commutative):
from .fancysets import ImageSet
d = Dummy('d')
out = _handle_finite_sets(op, x, y, commutative)
if out is None:
out = op(x, y)
if out is None and commutative:
out = op(y, x)
if out is None:
_x, _y = symbols("x y")
if isinstance(x, Set) and not isinstance(y, Set):
out = ImageSet(Lambda(d, op(d, y)), x).doit()
elif not isinstance(x, Set) and isinstance(y, Set):
out = ImageSet(Lambda(d, op(x, d)), y).doit()
else:
out = ImageSet(Lambda((_x, _y), op(_x, _y)), x, y)
return out
def set_add(x, y):
from sympy.sets.handlers.add import _set_add
return _apply_operation(_set_add, x, y, commutative=True)
def set_sub(x, y):
from sympy.sets.handlers.add import _set_sub
return _apply_operation(_set_sub, x, y, commutative=False)
def set_mul(x, y):
from sympy.sets.handlers.mul import _set_mul
return _apply_operation(_set_mul, x, y, commutative=True)
def set_div(x, y):
from sympy.sets.handlers.mul import _set_div
return _apply_operation(_set_div, x, y, commutative=False)
def set_pow(x, y):
from sympy.sets.handlers.power import _set_pow
return _apply_operation(_set_pow, x, y, commutative=False)
def set_function(f, x):
from sympy.sets.handlers.functions import _set_function
return _set_function(f, x)
| DisjointUnion |
python | walkccc__LeetCode | solutions/1577. Number of Ways Where Square of Number Is Equal to Product of Two Numbers/1577.py | {
"start": 0,
"end": 644
} | class ____:
def numTriplets(self, nums1: list[int], nums2: list[int]) -> int:
def countTriplets(A: list[int], B: list[int]):
"""Returns the number of triplet (i, j, k) if A[i]^2 == B[j] * B[k]."""
res = 0
count = collections.Counter(B)
for a in A:
target = a * a
for b, freq in count.items():
if target % b > 0 or target // b not in count:
continue
if target // b == b:
res += freq * (freq - 1)
else:
res += freq * count[target // b]
return res // 2
return countTriplets(nums1, nums2) + countTriplets(nums2, nums1)
| Solution |
python | ray-project__ray | python/ray/tests/test_output.py | {
"start": 13437,
"end": 13552
} | class ____:
def f(self):
print("hi stdout")
print("hi stderr", file=sys.stderr)
@ray.remote
| Actor1 |
python | keras-team__keras | keras/src/trainers/data_adapters/array_slicing.py | {
"start": 5933,
"end": 6434
} | class ____(Sliceable):
def __getitem__(self, indices):
return self.array.iloc[indices]
@classmethod
def convert_to_numpy(cls, x):
return x.to_numpy()
@classmethod
def convert_to_tf_dataset_compatible(cls, x):
return cls.convert_to_numpy(x)
@classmethod
def convert_to_jax_compatible(cls, x):
return cls.convert_to_numpy(x)
@classmethod
def convert_to_torch_compatible(cls, x):
return cls.convert_to_numpy(x)
| PandasSliceable |
python | huggingface__transformers | src/transformers/integrations/tensor_parallel.py | {
"start": 43344,
"end": 45147
} | class ____(TensorParallelLayer):
"""
Applies Expert Parallelism to MoE experts by loading the correct experts on each device.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.use_dtensor = False
def shard_tensor(
self,
param,
param_type=None,
param_casting_dtype=None,
to_contiguous=None,
rank=None,
device_mesh=None,
tensor_idx=None,
):
empty_param = self.empty_param
ep_rank = self.rank
device_mesh = self.device_mesh
global_num_experts = empty_param.shape[0]
if global_num_experts % device_mesh.size() != 0:
raise ValueError(
f"Global number of experts must be divisible by number of devices: {global_num_experts} % {device_mesh.size()} != 0"
)
local_num_experts = global_num_experts // device_mesh.size()
parameter = param[ep_rank * local_num_experts : (ep_rank + 1) * local_num_experts].to(param_casting_dtype)
self.shard = None
return parameter, None
def partition_tensor(self, param, empty_param, param_type, param_casting_dtype, to_contiguous, rank, device_mesh):
ep_rank = rank
global_num_experts = empty_param.shape[0]
if global_num_experts % device_mesh.size() != 0:
raise ValueError(
f"Global number of experts must be divisible by number of devices: {global_num_experts} % {device_mesh.size()} != 0"
)
local_num_experts = global_num_experts // device_mesh.size()
param = param[ep_rank * local_num_experts : (ep_rank + 1) * local_num_experts].to(param_casting_dtype)
if to_contiguous:
param = param.contiguous()
return param
| GroupedGemmParallel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 552638,
"end": 552997
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteBranchProtectionRule"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id",)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteBranchProtectionRulePayload |
python | PyCQA__pylint | doc/data/messages/u/unsupported-membership-test/good.py | {
"start": 0,
"end": 145
} | class ____:
FRUITS = ["apple", "orange"]
def __contains__(self, name):
return name in self.FRUITS
apple = "apple" in Fruit()
| Fruit |
python | huggingface__transformers | tests/utils/import_structures/import_structure_register_with_comments.py | {
"start": 1036,
"end": 1230
} | class ____:
def __init__(self):
pass
@requires(backends=("torch",))
# That's a statement
def b2():
pass
@requires(
backends=(
"torch",
)
)
# That's a statement
| B2 |
python | ray-project__ray | python/ray/_common/tests/test_formatters.py | {
"start": 142,
"end": 4042
} | class ____:
def test_empty_record(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord({})
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"timestamp_ns",
]
for key in should_exist:
assert key in record_dict
assert len(record_dict) == len(should_exist)
assert "exc_text" not in record_dict
def test_record_with_exception(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord({})
try:
raise ValueError("test")
except ValueError:
record.exc_info = sys.exc_info()
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"exc_text",
"timestamp_ns",
]
for key in should_exist:
assert key in record_dict
assert "Traceback (most recent call last):" in record_dict["exc_text"]
assert len(record_dict) == len(should_exist)
def test_record_with_user_provided_context(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord({"user": "ray"})
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"user",
"timestamp_ns",
]
for key in should_exist:
assert key in record_dict
assert record_dict["user"] == "ray"
assert len(record_dict) == len(should_exist)
assert "exc_text" not in record_dict
def test_record_with_flatten_keys_invalid_value(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord({"ray_serve_extra_fields": "not_a_dict"})
with pytest.raises(ValueError):
formatter.format(record)
def test_record_with_flatten_keys_valid_dict(self, shutdown_only):
formatter = JSONFormatter()
record = logging.makeLogRecord(
{"ray_serve_extra_fields": {"key1": "value1", "key2": 2}}
)
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"key1",
"key2",
"timestamp_ns",
]
for key in should_exist:
assert key in record_dict
assert record_dict["key1"] == "value1", record_dict
assert record_dict["key2"] == 2
assert "ray_serve_extra_fields" not in record_dict
assert len(record_dict) == len(should_exist)
assert "exc_text" not in record_dict
def test_record_with_valid_additional_log_standard_attrs(self, shutdown_only):
formatter = JSONFormatter()
formatter.set_additional_log_standard_attrs(["name"])
record = logging.makeLogRecord({})
formatted = formatter.format(record)
record_dict = json.loads(formatted)
should_exist = [
"process",
"asctime",
"levelname",
"message",
"filename",
"lineno",
"timestamp_ns",
"name",
]
for key in should_exist:
assert key in record_dict
assert len(record_dict) == len(should_exist)
| TestJSONFormatter |
python | ray-project__ray | python/ray/util/spark/start_hook_base.py | {
"start": 0,
"end": 417
} | class ____:
def __init__(self, is_global):
self.is_global = is_global
def get_default_temp_root_dir(self):
return "/tmp"
def on_ray_dashboard_created(self, port):
pass
def on_cluster_created(self, ray_cluster_handler):
pass
def on_spark_job_created(self, job_group_id):
pass
def custom_environment_variables(self):
return {}
| RayOnSparkStartHook |
python | django-import-export__django-import-export | tests/core/tests/test_widgets.py | {
"start": 9322,
"end": 9752
} | class ____(TestCase):
"""https://github.com/django-import-export/django-import-export/pull/94"""
def setUp(self):
self.date = date(1868, 8, 13)
self.widget = widgets.DateWidget("%d.%m.%Y")
def test_render(self):
self.assertEqual(self.widget.render(self.date), "13.08.1868")
def test_clean(self):
self.assertEqual(self.widget.clean("13.08.1868"), self.date)
| DateWidgetBefore1900Test |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataproc.py | {
"start": 156041,
"end": 158807
} | class ____:
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
page_token = "page_token"
page_size = 42
filter = 'batch_id=~"a-batch-id*" AND create_time>="2023-07-05T14:25:04.643818Z"'
order_by = "create_time desc"
op = DataprocListBatchesOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_REGION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
filter=filter,
order_by=order_by,
)
op.execute(context=MagicMock())
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.list_batches.assert_called_once_with(
region=GCP_REGION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
filter=filter,
order_by=order_by,
)
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
@mock.patch(DATAPROC_TRIGGERS_PATH.format("DataprocAsyncHook"))
def test_execute_deferrable(self, mock_trigger_hook, mock_hook):
mock_hook.return_value.submit_job.return_value.reference.job_id = TEST_JOB_ID
op = DataprocCreateBatchOperator(
task_id=TASK_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
batch=BATCH,
batch_id="batch_id",
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
request_id=REQUEST_ID,
impersonation_chain=IMPERSONATION_CHAIN,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
op.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_batch.assert_called_once_with(
region=GCP_REGION,
project_id=GCP_PROJECT,
batch_id="batch_id",
batch=BATCH,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.wait_for_job.assert_not_called()
assert isinstance(exc.value.trigger, DataprocBatchTrigger)
assert exc.value.method_name == GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME
| TestDataprocListBatchesOperator |
python | ray-project__ray | rllib/examples/envs/custom_env_render_method.py | {
"start": 3622,
"end": 7876
} | class ____(gym.Env):
"""Example of a custom env, for which we specify rendering behavior."""
def __init__(self, config):
self.end_pos = config.get("corridor_length", 10)
self.max_steps = config.get("max_steps", 100)
self.cur_pos = 0
self.steps = 0
self.action_space = Discrete(2)
self.observation_space = Box(0.0, 999.0, shape=(1,), dtype=np.float32)
def reset(self, *, seed=None, options=None):
self.cur_pos = 0.0
self.steps = 0
return np.array([self.cur_pos], np.float32), {}
def step(self, action):
self.steps += 1
assert action in [0, 1], action
if action == 0 and self.cur_pos > 0:
self.cur_pos -= 1.0
elif action == 1:
self.cur_pos += 1.0
truncated = self.steps >= self.max_steps
terminated = self.cur_pos >= self.end_pos
return (
np.array([self.cur_pos], np.float32),
10.0 if terminated else -0.1,
terminated,
truncated,
{},
)
def render(self) -> np._typing.NDArray[np.uint8]:
"""Implements rendering logic for this env (given the current observation).
You should return a numpy RGB image like so:
np.array([height, width, 3], dtype=np.uint8).
Returns:
np.ndarray: A numpy uint8 3D array (image) to render.
"""
# Image dimensions.
# Each position in the corridor is 50 pixels wide.
width = (self.end_pos + 2) * 50
# Fixed height of the image.
height = 100
# Create a new image with white background
image = Image.new("RGB", (width, height), "white")
draw = ImageDraw.Draw(image)
# Draw the corridor walls
# Grey rectangle for the corridor.
draw.rectangle([50, 30, width - 50, 70], fill="grey")
# Draw the agent.
# Calculate the x coordinate of the agent.
agent_x = (self.cur_pos + 1) * 50
# Blue rectangle for the agent.
draw.rectangle([agent_x + 10, 40, agent_x + 40, 60], fill="blue")
# Draw the goal state.
# Calculate the x coordinate of the goal.
goal_x = self.end_pos * 50
# Green rectangle for the goal state.
draw.rectangle([goal_x + 10, 40, goal_x + 40, 60], fill="green")
# Convert the image to a uint8 numpy array.
return np.array(image, dtype=np.uint8)
# Create a simple multi-agent version of the above Env by duplicating the single-agent
# env n (n=num agents) times and having the agents act independently, each one in a
# different corridor.
MultiAgentCustomRenderedCorridorEnv = make_multi_agent(
lambda config: CustomRenderedCorridorEnv(config)
)
if __name__ == "__main__":
args = parser.parse_args()
# The `config` arg passed into our Env's constructor (see the class' __init__ method
# above). Feel free to change these.
env_options = {
"corridor_length": 10,
"max_steps": 100,
"num_agents": args.num_agents, # <- only used by the multu-agent version.
}
env_cls_to_use = (
CustomRenderedCorridorEnv
if args.num_agents == 0
else MultiAgentCustomRenderedCorridorEnv
)
tune.register_env("env", lambda _: env_cls_to_use(env_options))
# Example config switching on rendering.
base_config = (
PPOConfig()
# Configure our env to be the above-registered one.
.environment("env")
# Plugin our env-rendering (and logging) callback. This callback class allows
# you to fully customize your rendering behavior (which workers should render,
# which episodes, which (vector) env indices, etc..). We refer to this example
# script here for further details:
# https://github.com/ray-project/ray/blob/master/rllib/examples/envs/env_rendering_and_recording.py # noqa
.callbacks(EnvRenderCallback)
)
if args.num_agents > 0:
base_config.multi_agent(
policies={f"p{i}" for i in range(args.num_agents)},
policy_mapping_fn=lambda aid, eps, **kw: f"p{aid}",
)
run_rllib_example_script_experiment(base_config, args)
| CustomRenderedCorridorEnv |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_types.py | {
"start": 4532,
"end": 5753
} | class ____(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
def test_result_processor(self):
expected = datetime.date(2000, 1, 2)
self._assert_result_processor(expected, "2000-01-02")
def _assert_result_processor(self, expected, value):
mssql_date_type = _MSDate()
result_processor = mssql_date_type.result_processor(None, None)
eq_(expected, result_processor(value))
def test_result_processor_invalid(self):
mssql_date_type = _MSDate()
result_processor = mssql_date_type.result_processor(None, None)
assert_raises_message(
ValueError,
"could not parse 'abc' as a date value",
result_processor,
"abc",
)
def test_extract(self, connection):
from sqlalchemy import extract
fivedaysago = datetime.datetime.now() - datetime.timedelta(days=5)
for field, exp in (
("year", fivedaysago.year),
("month", fivedaysago.month),
("day", fivedaysago.day),
):
r = connection.execute(
select(extract(field, fivedaysago))
).scalar()
eq_(r, exp)
| MSDateTypeTest |
python | PrefectHQ__prefect | src/integrations/prefect-docker/prefect_docker/host.py | {
"start": 212,
"end": 643
} | class ____(docker.DockerClient):
"""
Allow context managing Docker Client, but also allow it to be instantiated without.
"""
def __enter__(self):
"""
Enters the context manager.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Exits the context manager and closes the DockerClient.
"""
self.close()
| _ContextManageableDockerClient |
python | realpython__materials | python-maze-solver/source_code_final/src/maze_solver/view/primitives.py | {
"start": 451,
"end": 739
} | class ____(NamedTuple):
start: Point
end: Point
def draw(self, **attributes) -> str:
return tag(
"line",
x1=self.start.x,
y1=self.start.y,
x2=self.end.x,
y2=self.end.y,
**attributes,
)
| Line |
python | conda__conda | conda/api.py | {
"start": 6777,
"end": 10278
} | class ____:
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
High-level management and usage of repodata.json for subdirs.
"""
def __init__(self, channel):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Args:
channel (str or Channel):
The target subdir for the instance. Must either be a url that includes a subdir
or a :obj:`Channel` that includes a subdir. e.g.:
* 'https://repo.anaconda.com/pkgs/main/linux-64'
* Channel('https://repo.anaconda.com/pkgs/main/linux-64')
* Channel('conda-forge/osx-64')
"""
channel = Channel(channel)
if not channel.subdir:
raise ValueError("SubdirData requires platform-aware Channel objects.")
self._internal = _SubdirData(channel)
def query(self, package_ref_or_match_spec):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Run a query against this specific instance of repodata.
Args:
package_ref_or_match_spec (PackageRef or MatchSpec or str):
Either an exact :obj:`PackageRef` to match against, or a :obj:`MatchSpec`
query object. A :obj:`str` will be turned into a :obj:`MatchSpec` automatically.
Returns:
tuple[PackageRecord]
"""
return tuple(self._internal.query(package_ref_or_match_spec))
@staticmethod
def query_all(package_ref_or_match_spec, channels=None, subdirs=None):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Run a query against all repodata instances in channel/subdir matrix.
Args:
package_ref_or_match_spec (PackageRef or MatchSpec or str):
Either an exact :obj:`PackageRef` to match against, or a :obj:`MatchSpec`
query object. A :obj:`str` will be turned into a :obj:`MatchSpec` automatically.
channels (Iterable[Channel or str] or None):
An iterable of urls for channels or :obj:`Channel` objects. If None, will fall
back to context.channels.
subdirs (Iterable[str] or None):
If None, will fall back to context.subdirs.
Returns:
tuple[PackageRecord]
"""
return tuple(
_SubdirData.query_all(package_ref_or_match_spec, channels, subdirs)
)
def iter_records(self):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Returns:
Iterable[PackageRecord]: A generator over all records contained in the repodata.json
instance. Warning: this is a generator that is exhausted on first use.
"""
return self._internal.iter_records()
def reload(self):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Update the instance with new information. Backing information (i.e. repodata.json)
is lazily downloaded/loaded on first use by the other methods of this class. You
should only use this method if you are *sure* you have outdated data.
Returns:
SubdirData
"""
self._internal = self._internal.reload()
return self
| SubdirData |
python | huggingface__transformers | src/transformers/models/sew/modeling_sew.py | {
"start": 14393,
"end": 15746
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = SEWAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
config=config,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = SEWFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| SEWEncoderLayer |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 39320,
"end": 41183
} | class ____(BaseDataset):
"""
Feature: Datasets auto-created from data produce the correct types
"""
def assert_string_type(self, ds, cset, variable=True):
tid = ds.id.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertEqual(tid.get_cset(), cset)
if variable:
assert tid.is_variable_str()
def test_vlen_bytes(self):
"""Assigning byte strings produces a vlen string ASCII dataset """
x = make_name("x")
y = make_name("y")
z = make_name("z")
self.f[x] = b"Hello there"
self.assert_string_type(self.f[x], h5py.h5t.CSET_ASCII)
self.f[y] = [b"a", b"bc"]
self.assert_string_type(self.f[y], h5py.h5t.CSET_ASCII)
self.f[z] = np.array([b"a", b"bc"], dtype=np.object_)
self.assert_string_type(self.f[z], h5py.h5t.CSET_ASCII)
def test_vlen_unicode(self):
"""Assigning unicode strings produces a vlen string UTF-8 dataset """
x = make_name("x")
y = make_name("y")
z = make_name("z")
self.f[x] = "Hello there" + chr(0x2034)
self.assert_string_type(self.f[x], h5py.h5t.CSET_UTF8)
self.f[y] = ["a", "bc"]
self.assert_string_type(self.f[y], h5py.h5t.CSET_UTF8)
# 2D array; this only works with an array, not nested lists
self.f[z] = np.array([["a", "bc"]], dtype=np.object_)
self.assert_string_type(self.f[z], h5py.h5t.CSET_UTF8)
def test_string_fixed(self):
""" Assignment of fixed-length byte string produces a fixed-length
ascii dataset """
name = make_name()
self.f[name] = np.bytes_("Hello there")
ds = self.f[name]
self.assert_string_type(ds, h5py.h5t.CSET_ASCII, variable=False)
self.assertEqual(ds.id.get_type().get_size(), 11)
| TestAutoCreate |
python | django__django | tests/queries/tests.py | {
"start": 177974,
"end": 178170
} | class ____(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertSequenceEqual(qs, [])
| TestTicket24279 |
python | kamyu104__LeetCode-Solutions | Python/remove-linked-list-elements.py | {
"start": 128,
"end": 576
} | class ____(object):
# @param {ListNode} head
# @param {integer} val
# @return {ListNode}
def removeElements(self, head, val):
dummy = ListNode(float("-inf"))
dummy.next = head
prev, curr = dummy, dummy.next
while curr:
if curr.val == val:
prev.next = curr.next
else:
prev = curr
curr = curr.next
return dummy.next
| Solution |
python | kamyu104__LeetCode-Solutions | Python/erect-the-fence.py | {
"start": 170,
"end": 1791
} | class ____(object):
def outerTrees(self, points):
"""
:type points: List[List[int]]
:rtype: List[List[int]]
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(tuple(x) for x in points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) < 0: # modified
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) < 0: # modified
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
result = lower[:-1] + upper[:-1]
return result if result[1] != result[-1] else result[:len(result)//2+1] # modified
| Solution |
python | pypa__warehouse | tests/unit/accounts/test_forms.py | {
"start": 15355,
"end": 32300
} | class ____:
def test_validate(self, metrics):
captcha_service = pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
)
user_service = pretend.stub(
check_password=lambda userid, password, tags=None: True,
find_userid=lambda userid: None,
find_userid_by_email=pretend.call_recorder(lambda email: None),
is_disabled=lambda id: (False, None),
username_is_prohibited=lambda a: False,
)
breach_service = pretend.stub(
check_password=pretend.call_recorder(lambda pw, tags: False)
)
form = forms.RegistrationForm(
request=pretend.stub(
db=pretend.stub(query=lambda *a: pretend.stub(scalar=lambda: False)),
metrics=metrics,
),
formdata=MultiDict(
{
"username": "myusername",
"new_password": "mysupersecurepassword1!",
"password_confirm": "mysupersecurepassword1!",
"email": "foo@bar.com",
"g_recaptcha_reponse": "",
}
),
user_service=user_service,
captcha_service=captcha_service,
breach_service=breach_service,
)
assert form.user_service is user_service
assert form.captcha_service is captcha_service
assert form.validate(), str(form.errors)
def test_password_confirm_required_error(self):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"password_confirm": ""}),
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
),
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw: False),
)
assert not form.validate()
assert form.password_confirm.errors.pop() == "This field is required."
def test_passwords_mismatch_error(self, pyramid_config):
user_service = pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
)
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict(
{"new_password": "password", "password_confirm": "mismatch"}
),
user_service=user_service,
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
str(form.password_confirm.errors.pop())
== "Your passwords don't match. Try again."
)
def test_passwords_match_success(self):
user_service = pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
)
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict(
{
"new_password": "MyStr0ng!shPassword",
"password_confirm": "MyStr0ng!shPassword",
}
),
user_service=user_service,
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
form.validate()
assert len(form.new_password.errors) == 0
assert len(form.password_confirm.errors) == 0
def test_email_required_error(self):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"email": ""}),
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
),
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert form.email.errors.pop() == "This field is required."
@pytest.mark.parametrize("email", ["bad", "foo]bar@example.com", "</body></html>"])
def test_invalid_email_error(self, pyramid_request, email):
form = forms.RegistrationForm(
request=pyramid_request,
formdata=MultiDict({"email": email}),
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: None)
),
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
str(form.email.errors.pop()) == "The email address isn't valid. Try again."
)
def test_exotic_email_success(self, metrics):
form = forms.RegistrationForm(
request=pretend.stub(
db=pretend.stub(query=lambda *a: pretend.stub(scalar=lambda: False)),
metrics=metrics,
),
formdata=MultiDict({"email": "foo@n--tree.net"}),
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: None)
),
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
form.validate()
assert len(form.email.errors) == 0
def test_email_exists_error(self, pyramid_request):
pyramid_request.db = pretend.stub(
query=lambda *a: pretend.stub(scalar=lambda: False)
)
form = forms.RegistrationForm(
request=pyramid_request,
formdata=MultiDict({"email": "foo@bar.com"}),
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
),
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
str(form.email.errors.pop())
== "This email address is already being used by another account. "
"Use a different email."
)
def test_disposable_email_error(self, pyramid_request):
form = forms.RegistrationForm(
request=pyramid_request,
formdata=MultiDict({"email": "foo@bearsarefuzzy.com"}),
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: None)
),
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
str(form.email.errors.pop())
== "You can't use an email address from this domain. Use a "
"different email."
)
@pytest.mark.usefixtures("_no_deliverability_check")
@pytest.mark.parametrize(
("email", "prohibited_domain"),
[
("foo@wutang.net", "wutang.net"),
("foo@clan.wutang.net", "wutang.net"),
("foo@one.two.wutang.net", "wutang.net"),
("foo@wUtAnG.net", "wutang.net"),
("foo@one.wutang.co.uk", "wutang.co.uk"),
],
)
def test_prohibited_email_error(self, db_request, email, prohibited_domain):
domain = ProhibitedEmailDomain(domain=prohibited_domain)
db_request.db.add(domain)
form = forms.RegistrationForm(
request=db_request,
formdata=MultiDict({"email": email}),
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: None)
),
captcha_service=pretend.stub(enabled=True),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert form.email.errors
assert (
str(form.email.errors.pop())
== "You can't use an email address from this domain. Use a "
"different email."
)
def test_recaptcha_disabled(self):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"g_recpatcha_response": ""}),
user_service=pretend.stub(),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
# there shouldn't be any errors for the recaptcha field if it's
# disabled
assert not form.g_recaptcha_response.errors
def test_recaptcha_required_error(self):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"g_recaptcha_response": ""}),
user_service=pretend.stub(),
captcha_service=pretend.stub(
enabled=True,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert form.g_recaptcha_response.errors.pop() == "Captcha error."
def test_recaptcha_error(self):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"g_recaptcha_response": "asd"}),
user_service=pretend.stub(),
captcha_service=pretend.stub(
verify_response=pretend.raiser(recaptcha.RecaptchaError),
enabled=True,
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert form.g_recaptcha_response.errors.pop() == "Captcha error."
def test_username_exists(self, pyramid_config):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"username": "foo"}),
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda name: 1),
username_is_prohibited=lambda a: False,
),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
str(form.username.errors.pop())
== "This username is already being used by another account. "
"Choose a different username."
)
def test_username_prohibted(self, pyramid_config):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"username": "foo"}),
user_service=pretend.stub(
username_is_prohibited=lambda a: True,
),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
str(form.username.errors.pop())
== "This username is already being used by another account. "
"Choose a different username."
)
@pytest.mark.parametrize("username", ["_foo", "bar_", "foo^bar", "boo\0far"])
def test_username_is_valid(self, username, pyramid_config):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"username": username}),
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None),
username_is_prohibited=lambda a: False,
),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
str(form.username.errors.pop()) == "The username is invalid. Usernames "
"must be composed of letters, numbers, "
"dots, hyphens and underscores. And must "
"also start and finish with a letter or number. "
"Choose a different username."
)
def test_password_strength(self):
cases = (
("foobar", False),
("somethingalittlebetter9", True),
("1aDeCent!1", True),
)
for pwd, valid in cases:
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"new_password": pwd, "password_confirm": pwd}),
user_service=pretend.stub(),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
form.validate()
assert (len(form.new_password.errors) == 0) == valid
def test_password_breached(self):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"new_password": "password"}),
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(
check_password=lambda pw, tags=None: True,
failure_message=(
"This password has appeared in a breach or has otherwise been "
"compromised and cannot be used."
),
),
)
assert not form.validate()
assert form.new_password.errors.pop() == (
"This password has appeared in a breach or has otherwise been "
"compromised and cannot be used."
)
def test_name_too_long(self, pyramid_config):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"full_name": "hello " * 50}),
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: True),
)
assert not form.validate()
assert (
str(form.full_name.errors.pop())
== "The name is too long. Choose a name with 100 characters or less."
)
def test_name_contains_null_bytes(self, pyramid_config):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"full_name": "hello\0world"}),
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: True),
)
assert not form.validate()
assert form.full_name.errors.pop() == "Null bytes are not allowed."
@pytest.mark.parametrize(
"input_name",
[
"https://example.com",
"hello http://example.com",
"http://example.com goodbye",
],
)
def test_name_contains_url(self, pyramid_config, input_name):
form = forms.RegistrationForm(
request=pretend.stub(),
formdata=MultiDict({"full_name": input_name}),
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
captcha_service=pretend.stub(
enabled=False,
verify_response=pretend.call_recorder(lambda _: None),
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: True),
)
assert not form.validate()
assert (
str(form.full_name.errors.pop())
== "URLs are not allowed in the name field."
)
| TestRegistrationForm |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 30218,
"end": 30519
} | class ____(BaseModel):
dag_id: str
map_index: int | None = None
task_ids: list[str] | None = None
task_group_id: str | None = None
logical_dates: list[AwareDatetime] | None = None
run_ids: list[str] | None = None
type: Literal["GetTaskStates"] = "GetTaskStates"
| GetTaskStates |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/preferences.py | {
"start": 955,
"end": 7579
} | class ____(QDialog):
"""
Dialog window for setting viewing preferences of dataframe or array editor.
Set the attributes `float_format`, `varying_background` and `global_algo`
to set the options, if necessary. Call `exec_()` to show the dialog to the
user and allow them to interact. Finally, read the attributes to retrieve
the options selected by the user.
Parameters
----------
type_string: str
Type of variable being edited; should be 'dataframe' or 'array'.
The main difference is that arrays do not support the "by column"
coloring algorithm. Some text also uses the type.
parent: QWidget, optional
Parent widget.
"""
def __init__(self, type_string: str, parent: Optional[QWidget] = None):
super().__init__(parent)
self.type_string = type_string
self.setWindowTitle(
_('{} editor preferences').format(type_string.capitalize())
)
main_layout = QVBoxLayout(self)
formatting_group = QGroupBox(_('Formatting'))
formatting_layout = QVBoxLayout(formatting_group)
format_label = QLabel(
_('<a href="{url}">Format specification</a> for floats:')
.format(url=FORMAT_SPEC_URL)
)
format_label_layout = QHBoxLayout()
format_label_layout.setSpacing(0)
format_label_layout.addWidget(format_label)
format_tip_text = _(
'Use same syntax as for built-in <tt>format()</tt> function. '
'Default is <tt>.6g</tt>.'
)
self.add_help_info_label(format_label_layout, format_tip_text)
# Stylesheet aligns `format_label` and `format_input` to the left
self.format_input = QLineEdit(self)
self.format_input.setStyleSheet('margin-left: 5px')
formatting_layout.addLayout(format_label_layout)
formatting_layout.addWidget(self.format_input)
main_layout.addWidget(formatting_group)
background_group = QGroupBox(_('Background color'))
background_layout = QVBoxLayout(background_group)
self.default_background_button = self.create_radio_button(
_('Use default background color'),
_('Use same background color for all cells'),
background_group,
background_layout
)
self.varying_background_button = self.create_radio_button(
_('Vary background color according to value'),
_(
'Use red for largest number, blue for smallest number, '
'and intermediate colors for the other numbers.'
),
background_group,
background_layout
)
main_layout.addWidget(background_group)
if type_string == 'dataframe':
comparator_group = QGroupBox(_('Coloring algorithm'))
comparator_layout = QVBoxLayout(comparator_group)
self.global_button = self.create_radio_button(
_('Global'),
_(
'Compare each cell against the largest and smallest '
'numbers in the entire dataframe'
),
comparator_group,
comparator_layout
)
self.by_column_button = self.create_radio_button(
_('Column by column'),
_(
'Compare each cell against the largest and smallest '
'numbers in the same column'
),
comparator_group,
comparator_layout
)
main_layout.addWidget(comparator_group)
self.buttons = SpyderDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel
)
main_layout.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.default_background_button.setChecked(True)
if type_string == 'dataframe':
self.global_button.setChecked(True)
comparator_group.setEnabled(False)
self.varying_background_button.toggled.connect(
lambda value: comparator_group.setEnabled(value)
)
@property
def float_format(self) -> str:
"""
Format specification for floats.
"""
return self.format_input.text()
@float_format.setter
def float_format(self, new_format: str) -> None:
self.format_input.setText(new_format)
@property
def varying_background(self) -> bool:
"""
Whether to use a colored background.
If True, then vary the background of the cells in the editor according
to the value. If False, then use the default background in all cells.
"""
return self.varying_background_button.isChecked()
@varying_background.setter
def varying_background(self, value: bool):
if value:
self.varying_background_button.setChecked(True)
else:
self.default_background_button.setChecked(True)
@property
def global_algo(self) -> bool:
"""
Whether to use the global minimum and maximum to pick colors.
If True, then select the background color by comparing the cell value
against the minimum and maximum over the whole dataframe. If False,
then use the minimum and maximum over the column.
This attribute has no effect if `varying_background` is False.
"""
if self.type_string == 'dataframe':
return self.global_button.isChecked()
else:
return True
@global_algo.setter
def global_algo(self, value: bool):
if self.type_string == 'dataframe':
if value:
self.global_button.setChecked(True)
else:
self.by_column_button.setChecked(True)
def add_help_info_label(self, layout, tip_text):
help_label = TipWidget(
tip_text=tip_text,
icon=ima.icon('question_tip'),
hover_icon=ima.icon('question_tip_hover'),
wrap_text=True,
)
layout.addWidget(help_label)
layout.addStretch(100)
def create_radio_button(self, text, tip_text, button_group, layout):
hor_layout = QHBoxLayout()
hor_layout.setContentsMargins(0, 0, 0, 0)
radio_button = QRadioButton(text, button_group)
hor_layout.addWidget(radio_button)
self.add_help_info_label(hor_layout, tip_text)
layout.addLayout(hor_layout)
return radio_button
| PreferencesDialog |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 117312,
"end": 128869
} | class ____(
roles.SelectStatementRole,
roles.DMLSelectRole,
roles.CompoundElementRole,
roles.InElementRole,
HasCTE,
SupportsCloneAnnotations,
Selectable,
):
"""Base class for SELECT statements.
This includes :class:`_expression.Select`,
:class:`_expression.CompoundSelect` and
:class:`_expression.TextualSelect`.
"""
_is_select_base = True
is_select = True
_label_style: SelectLabelStyle = LABEL_STYLE_NONE
def _refresh_for_new_column(self, column: ColumnElement[Any]) -> None:
self._reset_memoizations()
@util.ro_non_memoized_property
def selected_columns(
self,
) -> ColumnCollection[str, ColumnElement[Any]]:
"""A :class:`_expression.ColumnCollection`
representing the columns that
this SELECT statement or similar construct returns in its result set.
This collection differs from the :attr:`_expression.FromClause.columns`
collection of a :class:`_expression.FromClause` in that the columns
within this collection cannot be directly nested inside another SELECT
statement; a subquery must be applied first which provides for the
necessary parenthesization required by SQL.
.. note::
The :attr:`_sql.SelectBase.selected_columns` collection does not
include expressions established in the columns clause using the
:func:`_sql.text` construct; these are silently omitted from the
collection. To use plain textual column expressions inside of a
:class:`_sql.Select` construct, use the :func:`_sql.literal_column`
construct.
.. seealso::
:attr:`_sql.Select.selected_columns`
.. versionadded:: 1.4
"""
raise NotImplementedError()
def _generate_fromclause_column_proxies(
self,
subquery: FromClause,
columns: ColumnCollection[str, KeyedColumnElement[Any]],
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
*,
proxy_compound_columns: Optional[
Iterable[Sequence[ColumnElement[Any]]]
] = None,
) -> None:
raise NotImplementedError()
@util.ro_non_memoized_property
def _all_selected_columns(self) -> _SelectIterable:
"""A sequence of expressions that correspond to what is rendered
in the columns clause, including :class:`_sql.TextClause`
constructs.
.. versionadded:: 1.4.12
.. seealso::
:attr:`_sql.SelectBase.exported_columns`
"""
raise NotImplementedError()
@property
def exported_columns(
self,
) -> ReadOnlyColumnCollection[str, ColumnElement[Any]]:
"""A :class:`_expression.ColumnCollection`
that represents the "exported"
columns of this :class:`_expression.Selectable`, not including
:class:`_sql.TextClause` constructs.
The "exported" columns for a :class:`_expression.SelectBase`
object are synonymous
with the :attr:`_expression.SelectBase.selected_columns` collection.
.. versionadded:: 1.4
.. seealso::
:attr:`_expression.Select.exported_columns`
:attr:`_expression.Selectable.exported_columns`
:attr:`_expression.FromClause.exported_columns`
"""
return self.selected_columns.as_readonly()
def get_label_style(self) -> SelectLabelStyle:
"""
Retrieve the current label style.
Implemented by subclasses.
"""
raise NotImplementedError()
def set_label_style(self, style: SelectLabelStyle) -> Self:
"""Return a new selectable with the specified label style.
Implemented by subclasses.
"""
raise NotImplementedError()
def _scalar_type(self) -> TypeEngine[Any]:
raise NotImplementedError()
@util.deprecated(
"1.4",
"The :meth:`_expression.SelectBase.as_scalar` "
"method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`_expression.SelectBase.scalar_subquery`.",
)
def as_scalar(self) -> ScalarSelect[Any]:
return self.scalar_subquery()
def exists(self) -> Exists:
"""Return an :class:`_sql.Exists` representation of this selectable,
which can be used as a column expression.
The returned object is an instance of :class:`_sql.Exists`.
.. seealso::
:func:`_sql.exists`
:ref:`tutorial_exists` - in the :term:`2.0 style` tutorial.
.. versionadded:: 1.4
"""
return Exists(self)
def scalar_subquery(self) -> ScalarSelect[Any]:
"""Return a 'scalar' representation of this selectable, which can be
used as a column expression.
The returned object is an instance of :class:`_sql.ScalarSelect`.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression. The scalar
subquery can then be used in the WHERE clause or columns clause of
an enclosing SELECT.
Note that the scalar subquery differentiates from the FROM-level
subquery that can be produced using the
:meth:`_expression.SelectBase.subquery`
method.
.. versionchanged:: 1.4 - the ``.as_scalar()`` method was renamed to
:meth:`_expression.SelectBase.scalar_subquery`.
.. seealso::
:ref:`tutorial_scalar_subquery` - in the 2.0 tutorial
"""
if self._label_style is not LABEL_STYLE_NONE:
self = self.set_label_style(LABEL_STYLE_NONE)
return ScalarSelect(self)
def label(self, name: Optional[str]) -> Label[Any]:
"""Return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
.. seealso::
:meth:`_expression.SelectBase.scalar_subquery`.
"""
return self.scalar_subquery().label(name)
def lateral(self, name: Optional[str] = None) -> LateralFromClause:
"""Return a LATERAL alias of this :class:`_expression.Selectable`.
The return value is the :class:`_expression.Lateral` construct also
provided by the top-level :func:`_expression.lateral` function.
.. seealso::
:ref:`tutorial_lateral_correlation` - overview of usage.
"""
return Lateral._factory(self, name)
def subquery(self, name: Optional[str] = None) -> Subquery:
"""Return a subquery of this :class:`_expression.SelectBase`.
A subquery is from a SQL perspective a parenthesized, named
construct that can be placed in the FROM clause of another
SELECT statement.
Given a SELECT statement such as::
stmt = select(table.c.id, table.c.name)
The above statement might look like:
.. sourcecode:: sql
SELECT table.id, table.name FROM table
The subquery form by itself renders the same way, however when
embedded into the FROM clause of another SELECT statement, it becomes
a named sub-element::
subq = stmt.subquery()
new_stmt = select(subq)
The above renders as:
.. sourcecode:: sql
SELECT anon_1.id, anon_1.name
FROM (SELECT table.id, table.name FROM table) AS anon_1
Historically, :meth:`_expression.SelectBase.subquery`
is equivalent to calling
the :meth:`_expression.FromClause.alias`
method on a FROM object; however,
as a :class:`_expression.SelectBase`
object is not directly FROM object,
the :meth:`_expression.SelectBase.subquery`
method provides clearer semantics.
.. versionadded:: 1.4
"""
return Subquery._construct(
self._ensure_disambiguated_names(), name=name
)
@util.preload_module("sqlalchemy.sql.ddl")
def into(
self,
target: str,
*,
metadata: Optional["MetaData"] = None,
schema: Optional[str] = None,
temporary: bool = False,
if_not_exists: bool = False,
) -> CreateTableAs:
"""Create a :class:`_schema.CreateTableAs` construct from this SELECT.
This method provides a convenient way to create a ``CREATE TABLE ...
AS`` statement from a SELECT, as well as compound SELECTs like UNION.
The new table will be created with columns matching the SELECT list.
Supported on all included backends, the construct emits
``CREATE TABLE...AS`` for all backends except SQL Server, which instead
emits a ``SELECT..INTO`` statement.
e.g.::
from sqlalchemy import select
# Create a new table from a SELECT
stmt = (
select(users.c.id, users.c.name)
.where(users.c.status == "active")
.into("active_users")
)
with engine.begin() as conn:
conn.execute(stmt)
# With optional flags
stmt = (
select(users.c.id)
.where(users.c.status == "inactive")
.into("inactive_users", schema="analytics", if_not_exists=True)
)
.. versionadded:: 2.1
:param target: Name of the table to create as a string. Must be
unqualified; use the ``schema`` parameter for qualification.
:param metadata: :class:`_schema.MetaData`, optional
If provided, the :class:`_schema.Table` object available via the
:attr:`.CreateTableAs.table` attribute will be associated with this
:class:`.MetaData`. Otherwise, a new, empty :class:`.MetaData`
is created.
:param schema: Optional schema name for the new table.
:param temporary: If True, create a temporary table where supported
:param if_not_exists: If True, add IF NOT EXISTS clause where supported
:return: A :class:`_schema.CreateTableAs` construct.
.. seealso::
:ref:`metadata_create_table_as` - in :ref:`metadata_toplevel`
:class:`_schema.CreateTableAs`
"""
sql_ddl = util.preloaded.sql_ddl
return sql_ddl.CreateTableAs(
self,
target,
metadata=metadata,
schema=schema,
temporary=temporary,
if_not_exists=if_not_exists,
)
def _ensure_disambiguated_names(self) -> Self:
"""Ensure that the names generated by this selectbase will be
disambiguated in some way, if possible.
"""
raise NotImplementedError()
def alias(
self, name: Optional[str] = None, flat: bool = False
) -> Subquery:
"""Return a named subquery against this
:class:`_expression.SelectBase`.
For a :class:`_expression.SelectBase` (as opposed to a
:class:`_expression.FromClause`),
this returns a :class:`.Subquery` object which behaves mostly the
same as the :class:`_expression.Alias` object that is used with a
:class:`_expression.FromClause`.
.. versionchanged:: 1.4 The :meth:`_expression.SelectBase.alias`
method is now
a synonym for the :meth:`_expression.SelectBase.subquery` method.
"""
return self.subquery(name=name)
_SB = TypeVar("_SB", bound=SelectBase)
| SelectBase |
python | pytorch__pytorch | test/dynamo/test_base_hop.py | {
"start": 8991,
"end": 10020
} | class ____(torch.nn.Module):
def forward(self, L_y_: "f32[3, 4]"):
l_y_ = L_y_
subgraph_0 = self.subgraph_0
invoke_quant_test = torch.ops.higher_order.invoke_quant_test(subgraph_0, l_y_, scheme = 'nf4'); subgraph_0 = l_y_ = None
getitem: "f32[3, 4]" = invoke_quant_test[0]; invoke_quant_test = None
return (getitem,)
class subgraph_0(torch.nn.Module):
def forward(self, l_y_: "f32[3, 4]"):
cos: "f32[3, 4]" = l_y_.cos(); l_y_ = None
return (cos,)
""",
)
def test_int_input(self):
def inner(x, y):
return x + y
backend = EagerAndRecordGraphs()
@torch.compile(backend=backend, fullgraph=True)
def f(x, y):
return invoke_quant_test(inner, x, y, scheme="nf4")
x = 1
y = torch.randn(3, 4)
out = f(x, y)
self.assertEqual(out, inner(x, y))
self.assertExpectedInline(
normalize_graph(backend.graphs[0]),
"""\
| GraphModule |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 471486,
"end": 471963
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of AddVerifiableDomain"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "domain")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
domain = sgqlc.types.Field("VerifiableDomain", graphql_name="domain")
"""The verifiable domain that was added."""
| AddVerifiableDomainPayload |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/organization_detector_count.py | {
"start": 874,
"end": 1022
} | class ____(TypedDict):
active: int
deactive: int
total: int
@region_silo_endpoint
@extend_schema(tags=["Workflows"])
| DetectorCountResponse |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/projection_queries/snippets.py | {
"start": 1037,
"end": 1187
} | class ____(ndb.Model):
type = ndb.StringProperty() # E.g., 'home', 'work'
street = ndb.StringProperty()
city = ndb.StringProperty()
| Address |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-scrapegraph/examples/complete-scrapegraph-examples.py | {
"start": 498,
"end": 6812
} | class ____(BaseModel):
"""Schema for news article information."""
title: str = Field(description="Article title")
author: str = Field(description="Article author", default="N/A")
date: str = Field(description="Publication date", default="N/A")
summary: str = Field(description="Article summary", default="N/A")
def demonstrate_all_tools():
"""Demonstrate all ScrapeGraph tool functionalities."""
# Initialize the tool spec (will use SGAI_API_KEY from environment)
scrapegraph_tool = ScrapegraphToolSpec()
print("🚀 Complete ScrapeGraph Tools Demonstration")
print("=" * 47)
# 1. SmartScraper Example
print("\n🤖 1. SmartScraper - AI-Powered Data Extraction")
print("-" * 50)
try:
response = scrapegraph_tool.scrapegraph_smartscraper(
prompt="Extract the main headline, key points, and any important information from this page",
url="https://example.com/",
)
if "error" not in response:
print("✅ SmartScraper extraction successful:")
print(f"Result: {str(response)[:300]}...")
else:
print(f"❌ SmartScraper error: {response['error']}")
except Exception as e:
print(f"❌ SmartScraper exception: {str(e)}")
# 2. Markdownify Example
print("\n📄 2. Markdownify - Content to Markdown Conversion")
print("-" * 54)
try:
response = scrapegraph_tool.scrapegraph_markdownify(
url="https://example.com/",
)
if "failed" not in str(response).lower():
print("✅ Markdownify conversion successful:")
print(f"Markdown preview: {response[:200]}...")
print(f"Total length: {len(response)} characters")
else:
print(f"❌ Markdownify error: {response}")
except Exception as e:
print(f"❌ Markdownify exception: {str(e)}")
# 3. Search Example
print("\n🔍 3. Search - Web Search Functionality")
print("-" * 39)
try:
response = scrapegraph_tool.scrapegraph_search(
query="ScrapeGraph AI web scraping tools",
max_results=3
)
if "failed" not in str(response).lower():
print("✅ Search successful:")
print(f"Search results: {str(response)[:300]}...")
else:
print(f"❌ Search error: {response}")
except Exception as e:
print(f"❌ Search exception: {str(e)}")
# 4. Basic Scrape Example
print("\n🌐 4. Basic Scrape - HTML Content Extraction")
print("-" * 46)
try:
response = scrapegraph_tool.scrapegraph_scrape(
url="https://httpbin.org/html",
render_heavy_js=False,
headers={"User-Agent": "ScrapeGraph-Demo/1.0"}
)
if "error" not in response:
html_content = response.get("html", "")
print("✅ Basic scrape successful:")
print(f"HTML length: {len(html_content):,} characters")
print(f"Request ID: {response.get('request_id', 'N/A')}")
# Extract title if present
if "<title>" in html_content:
title_start = html_content.find("<title>") + 7
title_end = html_content.find("</title>", title_start)
if title_end != -1:
title = html_content[title_start:title_end]
print(f"Page title: {title}")
else:
print(f"❌ Basic scrape error: {response['error']}")
except Exception as e:
print(f"❌ Basic scrape exception: {str(e)}")
# 5. Agentic Scraper Example
print("\n🤖 5. Agentic Scraper - Intelligent Navigation")
print("-" * 47)
try:
response = scrapegraph_tool.scrapegraph_agentic_scraper(
prompt="Navigate through this website and find any contact information, company details, or important announcements. Look in multiple sections if needed.",
url="https://example.com/",
)
if "error" not in response:
print("✅ Agentic scraper successful:")
if isinstance(response, dict):
for key, value in response.items():
print(f" {key}: {str(value)[:100]}...")
else:
print(f"Navigation result: {str(response)[:300]}...")
else:
print(f"❌ Agentic scraper error: {response['error']}")
except Exception as e:
print(f"❌ Agentic scraper exception: {str(e)}")
# 6. Integration with LlamaIndex Agent Example
print("\n🔗 6. LlamaIndex Agent Integration")
print("-" * 35)
try:
# Create tools list
tools = scrapegraph_tool.to_tool_list()
print(f"✅ Created {len(tools)} tools for LlamaIndex integration:")
for tool in tools:
print(f" • {tool.metadata.name}: {tool.metadata.description[:60]}...")
print("\n💡 These tools can be used with LlamaIndex agents:")
print(" from llama_index.core.agent import ReActAgent")
print(" agent = ReActAgent.from_tools(tools, llm=your_llm)")
except Exception as e:
print(f"❌ Integration setup error: {str(e)}")
# Performance and Usage Summary
print("\n📊 Tool Comparison Summary")
print("-" * 28)
print("SmartScraper: 🎯 Best for structured data extraction with AI")
print("Markdownify: 📄 Best for content analysis and documentation")
print("Search: 🔍 Best for finding information across the web")
print("Basic Scrape: ⚡ Fastest for simple HTML content extraction")
print("Agentic Scraper: 🧠 Most powerful for complex navigation tasks")
print("\n🎯 Use Case Recommendations:")
print("• Data Mining: SmartScraper + Agentic Scraper")
print("• Content Analysis: Markdownify + SmartScraper")
print("• Research: Search + SmartScraper")
print("• Monitoring: Basic Scrape (fastest)")
print("• Complex Sites: Agentic Scraper")
print("\n📚 Next Steps:")
print("• Set SGAI_API_KEY environment variable")
print("• Choose the right tool for your use case")
print("• Combine tools for comprehensive workflows")
print("• Integrate with LlamaIndex agents for advanced automation")
def main():
"""Run the complete demonstration."""
demonstrate_all_tools()
if __name__ == "__main__":
main()
| NewsArticle |
python | chroma-core__chroma | chromadb/errors.py | {
"start": 1906,
"end": 2093
} | class ____(ChromaError):
@overrides
def code(self) -> int:
return 404
@classmethod
@overrides
def name(cls) -> str:
return "NotFoundError"
| NotFoundError |
python | neetcode-gh__leetcode | python/0678-valid-parenthesis-string.py | {
"start": 30,
"end": 725
} | class ____:
def checkValidString(self, s: str) -> bool:
dp = {(len(s), 0): True} # key=(i, leftCount) -> isValid
def dfs(i, left):
if i == len(s) or left < 0:
return left == 0
if (i, left) in dp:
return dp[(i, left)]
if s[i] == "(":
dp[(i, left)] = dfs(i + 1, left + 1)
elif s[i] == ")":
dp[(i, left)] = dfs(i + 1, left - 1)
else:
dp[(i, left)] = (
dfs(i + 1, left + 1) or dfs(i + 1, left - 1) or dfs(i + 1, left)
)
return dp[(i, left)]
return dfs(0, 0)
# Greedy: O(n)
| Solution |
python | pytorch__pytorch | torch/testing/_internal/common_pruning.py | {
"start": 6147,
"end": 7112
} | class ____(nn.Module):
r"""Model with only Conv2d layers, some with bias, some in a Sequential and some following.
Activation function modules in between each Sequential layer, functional activations called
in-between each outside layer.
Used to test pruned Conv2d-Bias-Activation-Conv2d fusion."""
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, bias=True),
nn.ReLU(),
nn.Conv2d(32, 64, 3, 1, bias=True),
nn.Tanh(),
nn.Conv2d(64, 64, 3, 1, bias=False),
nn.ReLU(),
)
self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False)
self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.seq(x)
x = self.conv2d1(x)
x = F.relu(x)
x = self.conv2d2(x)
x = F.hardtanh(x)
return x
| Conv2dActivation |
python | nedbat__coveragepy | tests/test_report_core.py | {
"start": 1052,
"end": 2315
} | class ____(CoverageTest):
"""Tests of render_report."""
def test_stdout(self) -> None:
fake = FakeReporter(output="Hello!\n")
msgs: list[str] = []
res = render_report("-", fake, [pytest, "coverage"], msgs.append)
assert res == 17.25
assert fake.morfs == [pytest, "coverage"]
assert self.stdout() == "Hello!\n"
assert not msgs
def test_file(self) -> None:
fake = FakeReporter(output="Gréètings!\n")
msgs: list[str] = []
res = render_report("output.txt", fake, [], msgs.append)
assert res == 17.25
assert self.stdout() == ""
with open("output.txt", "rb") as f:
assert f.read().rstrip() == b"Gr\xc3\xa9\xc3\xa8tings!"
assert msgs == ["Wrote fake report file to output.txt"]
@pytest.mark.parametrize("error", [CoverageException, ZeroDivisionError])
def test_exception(self, error: type[Exception]) -> None:
fake = FakeReporter(error=error)
msgs: list[str] = []
with pytest.raises(error, match="You asked for it!"):
render_report("output.txt", fake, [], msgs.append)
assert self.stdout() == ""
self.assert_doesnt_exist("output.txt")
assert not msgs
| RenderReportTest |
python | getsentry__sentry | src/sentry/api/serializers/rest_framework/list.py | {
"start": 51,
"end": 215
} | class ____(ListField):
def to_internal_value(self, data):
if data == "":
return ""
return super().to_internal_value(data)
| EmptyListField |
python | python-openxml__python-docx | src/docx/opc/phys_pkg.py | {
"start": 2318,
"end": 3375
} | class ____(PhysPkgReader):
"""Implements |PhysPkgReader| interface for a zip file OPC package."""
def __init__(self, pkg_file):
super(_ZipPkgReader, self).__init__()
self._zipf = ZipFile(pkg_file, "r")
def blob_for(self, pack_uri):
"""Return blob corresponding to `pack_uri`.
Raises |ValueError| if no matching member is present in zip archive.
"""
return self._zipf.read(pack_uri.membername)
def close(self):
"""Close the zip archive, releasing any resources it is using."""
self._zipf.close()
@property
def content_types_xml(self):
"""Return the `[Content_Types].xml` blob from the zip package."""
return self.blob_for(CONTENT_TYPES_URI)
def rels_xml_for(self, source_uri):
"""Return rels item XML for source with `source_uri` or None if no rels item is
present."""
try:
rels_xml = self.blob_for(source_uri.rels_uri)
except KeyError:
rels_xml = None
return rels_xml
| _ZipPkgReader |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-oci-data-science/llama_index/embeddings/oci_data_science/base.py | {
"start": 541,
"end": 11992
} | class ____(BaseEmbedding):
"""
Embedding class for OCI Data Science models.
This class provides methods to generate embeddings using models deployed on
Oracle Cloud Infrastructure (OCI) Data Science. It supports both synchronous
and asynchronous requests and handles authentication, batching, and other
configurations.
Setup:
Install the required packages:
```bash
pip install -U oracle-ads llama-index-embeddings-oci-data-science
```
Configure authentication using `ads.set_auth()`. For example, to use OCI
Resource Principal for authentication:
```python
import ads
ads.set_auth("resource_principal")
```
For more details on authentication, see:
https://accelerated-data-science.readthedocs.io/en/latest/user_guide/cli/authentication.html
Ensure you have the required policies to access the OCI Data Science Model
Deployment endpoint:
https://docs.oracle.com/en-us/iaas/data-science/using/model-dep-policies-auth.htm
To learn more about deploying LLM models in OCI Data Science, see:
https://docs.oracle.com/en-us/iaas/data-science/using/ai-quick-actions-model-deploy.htm
Examples:
Basic Usage:
```python
import ads
from llama_index.embeddings.oci_data_science import OCIDataScienceEmbedding
ads.set_auth(auth="security_token", profile="OC1")
embeddings = OCIDataScienceEmbedding(
endpoint="https://<MD_OCID>/predict",
)
e1 = embeddings.get_text_embedding("This is a test document")
print(e1)
e2 = embeddings.get_text_embedding_batch([
"This is a test document",
"This is another test document"
])
print(e2)
```
Asynchronous Usage:
```python
import ads
import asyncio
from llama_index.embeddings.oci_data_science import OCIDataScienceEmbedding
ads.set_auth(auth="security_token", profile="OC1")
embeddings = OCIDataScienceEmbedding(
endpoint="https://<MD_OCID>/predict",
)
async def async_embedding():
e1 = await embeddings.aget_query_embedding("This is a test document")
print(e1)
asyncio.run(async_embedding())
```
Attributes:
endpoint (str): The URI of the endpoint from the deployed model.
auth (Dict[str, Any]): The authentication dictionary used for OCI API requests.
model_name (str): The name of the OCI Data Science embedding model.
embed_batch_size (int): The batch size for embedding calls.
additional_kwargs (Dict[str, Any]): Additional keyword arguments for the OCI Data Science AI request.
default_headers (Dict[str, str]): The default headers for API requests.
"""
endpoint: str = Field(
default=None, description="The URI of the endpoint from the deployed model."
)
auth: Union[Dict[str, Any], None] = Field(
default_factory=dict,
exclude=True,
description=(
"The authentication dictionary used for OCI API requests. "
"If not provided, it will be autogenerated based on environment variables."
),
)
model_name: Optional[str] = Field(
default=DEFAULT_MODEL,
description="The name of the OCI Data Science embedding model to use.",
)
embed_batch_size: int = Field(
default=DEFAULT_EMBED_BATCH_SIZE,
description="The batch size for embedding calls.",
gt=0,
le=2048,
)
max_retries: int = Field(
default=DEFAULT_MAX_RETRIES,
description="The maximum number of API retries.",
ge=0,
)
timeout: float = Field(
default=DEFAULT_TIMEOUT, description="The timeout to use in seconds.", ge=0
)
additional_kwargs: Optional[Dict[str, Any]] = Field(
default_factory=dict,
description="Additional keyword arguments for the OCI Data Science AI request.",
)
default_headers: Optional[Dict[str, str]] = Field(
default_factory=dict, description="The default headers for API requests."
)
_client: Client = PrivateAttr()
_async_client: AsyncClient = PrivateAttr()
def __init__(
self,
endpoint: str,
model_name: Optional[str] = DEFAULT_MODEL,
auth: Dict[str, Any] = None,
timeout: Optional[float] = DEFAULT_TIMEOUT,
max_retries: Optional[int] = DEFAULT_MAX_RETRIES,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
default_headers: Optional[Dict[str, str]] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""
Initialize the OCIDataScienceEmbedding instance.
Args:
endpoint (str): The URI of the endpoint from the deployed model.
model_name (Optional[str]): The name of the OCI Data Science embedding model to use. Defaults to "odsc-embeddings".
auth (Optional[Dict[str, Any]]): The authentication dictionary for OCI API requests. Defaults to None.
timeout (Optional[float]): The timeout setting for the HTTP request in seconds. Defaults to 120.
max_retries (Optional[int]): The maximum number of retry attempts for the request. Defaults to 5.
embed_batch_size (int): The batch size for embedding calls. Defaults to DEFAULT_EMBED_BATCH_SIZE.
additional_kwargs (Optional[Dict[str, Any]]): Additional arguments for the OCI Data Science AI request. Defaults to None.
default_headers (Optional[Dict[str, str]]): The default headers for API requests. Defaults to None.
callback_manager (Optional[CallbackManager]): A callback manager for handling events during embedding operations. Defaults to None.
**kwargs: Additional keyword arguments.
"""
super().__init__(
model_name=model_name,
endpoint=endpoint,
auth=auth,
embed_batch_size=embed_batch_size,
timeout=timeout,
max_retries=max_retries,
additional_kwargs=additional_kwargs or {},
default_headers=default_headers or {},
callback_manager=callback_manager,
**kwargs,
)
@model_validator(mode="before")
# @_validate_dependency
def validate_env(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""
Validate the environment and dependencies before initialization.
Args:
values (Dict[str, Any]): The values passed to the model.
Returns:
Dict[str, Any]: The validated values.
Raises:
ImportError: If required dependencies are missing.
"""
return values
@property
def client(self) -> Client:
"""
Return the synchronous client instance.
Returns:
Client: The synchronous client for interacting with the OCI Data Science Model Deployment endpoint.
"""
if not hasattr(self, "_client") or self._client is None:
self._client = Client(
endpoint=self.endpoint,
auth=self.auth,
retries=self.max_retries,
timeout=self.timeout,
)
return self._client
@property
def async_client(self) -> AsyncClient:
"""
Return the asynchronous client instance.
Returns:
AsyncClient: The asynchronous client for interacting with the OCI Data Science Model Deployment endpoint.
"""
if not hasattr(self, "_async_client") or self._async_client is None:
self._async_client = AsyncClient(
endpoint=self.endpoint,
auth=self.auth,
retries=self.max_retries,
timeout=self.timeout,
)
return self._async_client
@classmethod
def class_name(cls) -> str:
"""
Get the class name.
Returns:
str: The name of the class.
"""
return "OCIDataScienceEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""
Generate an embedding for a query string.
Args:
query (str): The query string for which to generate an embedding.
Returns:
List[float]: The embedding vector for the query.
"""
return self.client.embeddings(
input=query, payload=self.additional_kwargs, headers=self.default_headers
)["data"][0]["embedding"]
def _get_text_embedding(self, text: str) -> List[float]:
"""
Generate an embedding for a text string.
Args:
text (str): The text string for which to generate an embedding.
Returns:
List[float]: The embedding vector for the text.
"""
return self.client.embeddings(
input=text, payload=self.additional_kwargs, headers=self.default_headers
)["data"][0]["embedding"]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""
Asynchronously generate an embedding for a text string.
Args:
text (str): The text string for which to generate an embedding.
Returns:
List[float]: The embedding vector for the text.
"""
response = await self.async_client.embeddings(
input=text, payload=self.additional_kwargs, headers=self.default_headers
)
return response["data"][0]["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for a list of text strings.
Args:
texts (List[str]): A list of text strings for which to generate embeddings.
Returns:
List[List[float]]: A list of embedding vectors corresponding to the input texts.
"""
response = self.client.embeddings(
input=texts, payload=self.additional_kwargs, headers=self.default_headers
)
return [raw["embedding"] for raw in response["data"]]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""
Asynchronously generate an embedding for a query string.
Args:
query (str): The query string for which to generate an embedding.
Returns:
List[float]: The embedding vector for the query.
"""
response = await self.async_client.embeddings(
input=query, payload=self.additional_kwargs, headers=self.default_headers
)
return response["data"][0]["embedding"]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""
Asynchronously generate embeddings for a list of text strings.
Args:
texts (List[str]): A list of text strings for which to generate embeddings.
Returns:
List[List[float]]: A list of embedding vectors corresponding to the input texts.
"""
response = await self.async_client.embeddings(
input=texts, payload=self.additional_kwargs, headers=self.default_headers
)
return [raw["embedding"] for raw in response["data"]]
| OCIDataScienceEmbedding |
python | astropy__astropy | astropy/utils/iers/tests/test_iers.py | {
"start": 4689,
"end": 7779
} | class ____:
@classmethod
def teardown_class(cls):
iers.IERS_A.close()
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab["UT1_UTC"].unit / u.second).is_unity()
assert "P" in iers_tab["UT1Flag"]
assert "I" in iers_tab["UT1Flag"]
assert "B" in iers_tab["UT1Flag"]
assert np.all(
(iers_tab["UT1Flag"] == "I")
| (iers_tab["UT1Flag"] == "P")
| (iers_tab["UT1Flag"] == "B")
)
assert (iers_tab["dX_2000A"].unit / u.marcsec).is_unity()
assert (iers_tab["dY_2000A"].unit / u.marcsec).is_unity()
assert "P" in iers_tab["NutFlag"]
assert "I" in iers_tab["NutFlag"]
assert "B" in iers_tab["NutFlag"]
assert np.all(
(iers_tab["NutFlag"] == "P")
| (iers_tab["NutFlag"] == "I")
| (iers_tab["NutFlag"] == "B")
)
assert (iers_tab["PM_x"].unit / u.arcsecond).is_unity()
assert (iers_tab["PM_y"].unit / u.arcsecond).is_unity()
assert "P" in iers_tab["PolPMFlag"]
assert "I" in iers_tab["PolPMFlag"]
assert "B" in iers_tab["PolPMFlag"]
assert np.all(
(iers_tab["PolPMFlag"] == "P")
| (iers_tab["PolPMFlag"] == "I")
| (iers_tab["PolPMFlag"] == "B")
)
t = Time([57053.0, 57054.0, 57055.0], format="mjd")
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(
ut1_utc, [-0.4916557, -0.4925323, -0.4934373] * u.s, atol=0.1 * u.ms
)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(
dcip_x, [-0.086, -0.093, -0.087] * u.marcsec, atol=1.0 * u.narcsec
)
assert_quantity_allclose(
dcip_y, [0.094, 0.081, 0.072] * u.marcsec, atol=1 * u.narcsec
)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(
pm_x, [0.003734, 0.004581, 0.004623] * u.arcsec, atol=0.1 * u.marcsec
)
assert_quantity_allclose(
pm_y, [0.310824, 0.313150, 0.315517] * u.arcsec, atol=0.1 * u.marcsec
)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
| TestIERS_AExcerpt |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 34770,
"end": 36091
} | class ____(ModelOutput):
r"""
multi_scale_features (`tuple(torch.FloatTensor)`):
Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,
width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.
mask_features (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder
Layer.
attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed
or when `config.output_attentions=True`
"""
multi_scale_features: Optional[tuple[torch.FloatTensor]] = None
mask_features: Optional[torch.FloatTensor] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the
`encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale
Deformable Attention based decoder.
"""
)
| OneFormerPixelDecoderOutput |
python | paramiko__paramiko | paramiko/packet.py | {
"start": 1351,
"end": 1584
} | class ____(Exception):
"""
Exception indicating a rekey is needed.
"""
pass
def first_arg(e):
arg = None
if type(e.args) is tuple and len(e.args) > 0:
arg = e.args[0]
return arg
| NeedRekeyException |
python | scipy__scipy | scipy/_lib/tests/test__util.py | {
"start": 11496,
"end": 16209
} | class ____:
def test_policy(self):
data = np.array([1, 2, 3, np.nan])
assert _contains_nan(data) # default policy is "propagate"
assert _contains_nan(data, nan_policy="propagate")
assert _contains_nan(data, nan_policy="omit")
assert not _contains_nan(data[:3])
assert not _contains_nan(data[:3], nan_policy="propagate")
assert not _contains_nan(data[:3], nan_policy="omit")
with pytest.raises(ValueError, match="The input contains nan values"):
_contains_nan(data, nan_policy="raise")
assert not _contains_nan(data[:3], nan_policy="raise")
with pytest.raises(ValueError, match="nan_policy must be one of"):
_contains_nan(data, nan_policy="nan")
def test_contains_nan(self):
# Special case: empty array
assert not _contains_nan(np.array([], dtype=float))
# Integer arrays cannot contain NaN
assert not _contains_nan(np.array([1, 2, 3]))
assert not _contains_nan(np.array([[1, 2], [3, 4]]))
assert not _contains_nan(np.array([1., 2., 3.]))
assert not _contains_nan(np.array([1., 2.j, 3.]))
assert _contains_nan(np.array([1., 2.j, np.nan]))
assert _contains_nan(np.array([1., 2., np.nan]))
assert _contains_nan(np.array([np.nan, 2., np.nan]))
assert not _contains_nan(np.array([[1., 2.], [3., 4.]]))
assert _contains_nan(np.array([[1., 2.], [3., np.nan]]))
@skip_xp_invalid_arg
def test_contains_nan_with_strings(self):
data1 = np.array([1, 2, "3", np.nan]) # converted to string "nan"
assert not _contains_nan(data1)
data2 = np.array([1, 2, "3", np.nan], dtype='object')
assert _contains_nan(data2)
data3 = np.array([["1", 2], [3, np.nan]]) # converted to string "nan"
assert not _contains_nan(data3)
data4 = np.array([["1", 2], [3, np.nan]], dtype='object')
assert _contains_nan(data4)
@pytest.mark.skip_xp_backends(eager_only=True,
reason="lazy backends tested separately")
@pytest.mark.parametrize("nan_policy", ['propagate', 'omit', 'raise'])
def test_array_api(self, xp, nan_policy):
rng = np.random.default_rng(932347235892482)
x0 = rng.random(size=(2, 3, 4))
x = xp.asarray(x0)
assert not _contains_nan(x, nan_policy)
x = xpx.at(x)[1, 2, 1].set(xp.nan)
if nan_policy == 'raise':
with pytest.raises(ValueError, match="The input contains nan values"):
_contains_nan(x, nan_policy)
elif nan_policy == 'omit':
assert _contains_nan(x, nan_policy, xp_omit_okay=True)
elif nan_policy == 'propagate':
assert _contains_nan(x, nan_policy)
@pytest.mark.skip_xp_backends("numpy", reason="lazy backends only")
@pytest.mark.skip_xp_backends("cupy", reason="lazy backends only")
@pytest.mark.skip_xp_backends("array_api_strict", reason="lazy backends only")
@pytest.mark.skip_xp_backends("torch", reason="lazy backends only")
def test_array_api_lazy(self, xp):
rng = np.random.default_rng(932347235892482)
x0 = rng.random(size=(2, 3, 4))
x = xp.asarray(x0)
xp_assert_equal(_contains_nan(x), xp.asarray(False))
xp_assert_equal(_contains_nan(x, "propagate"), xp.asarray(False))
xp_assert_equal(_contains_nan(x, "omit", xp_omit_okay=True), xp.asarray(False))
# Lazy arrays don't support "omit" and "raise" policies
match = "not supported for lazy arrays"
with pytest.raises(TypeError, match=match):
_contains_nan(x, "omit")
with pytest.raises(TypeError, match=match):
_contains_nan(x, "raise")
x = xpx.at(x)[1, 2, 1].set(np.nan)
xp_assert_equal(_contains_nan(x), xp.asarray(True))
xp_assert_equal(_contains_nan(x, "propagate"), xp.asarray(True))
xp_assert_equal(_contains_nan(x, "omit", xp_omit_okay=True), xp.asarray(True))
with pytest.raises(TypeError, match=match):
_contains_nan(x, "omit")
with pytest.raises(TypeError, match=match):
_contains_nan(x, "raise")
def test__rng_html_rewrite():
def mock_str():
lines = [
'np.random.default_rng(8989843)',
'np.random.default_rng(seed)',
'np.random.default_rng(0x9a71b21474694f919882289dc1559ca)',
' bob ',
]
return lines
res = _rng_html_rewrite(mock_str)()
ref = [
'np.random.default_rng()',
'np.random.default_rng(seed)',
'np.random.default_rng()',
' bob ',
]
assert res == ref
| TestContainsNaN |
python | buildout__buildout | src/zc/buildout/easy_install.py | {
"start": 12250,
"end": 65817
} | class ____(object):
_versions = {}
_required_by = {}
_picked_versions = {}
_download_cache = None
_install_from_cache = False
_prefer_final = True
_use_dependency_links = True
_allow_picked_versions = True
_store_required_by = False
_allow_unknown_extras = False
_namespace_packages = {}
_index_url = None
def __init__(self,
dest=None,
links=(),
index=None,
executable=sys.executable,
always_unzip=None, # Backward compat :/
path=None,
newest=True,
versions=None,
use_dependency_links=None,
allow_hosts=('*',),
check_picked=True,
allow_unknown_extras=False,
):
assert executable == sys.executable, (executable, sys.executable)
self._dest = dest if dest is None else pkg_resources.normalize_path(dest)
self._allow_hosts = allow_hosts
self._allow_unknown_extras = allow_unknown_extras
if self._install_from_cache:
if not self._download_cache:
raise ValueError("install_from_cache set to true with no"
" download cache")
links = ()
index = 'file://' + self._download_cache
if use_dependency_links is not None:
self._use_dependency_links = use_dependency_links
self._links = links = list(self._fix_file_links(links))
if self._download_cache and (self._download_cache not in links):
links.insert(0, self._download_cache)
if index:
self._index_url = index
path = (path and path[:] or []) + buildout_and_setuptools_path
self._path = path
if self._dest is None:
newest = False
self._newest = newest
self._env = self._make_env()
self._index = _get_index(index, links, self._allow_hosts)
self._requirements_and_constraints = []
self._check_picked = check_picked
if versions is not None:
self._versions = normalize_versions(versions)
def _make_env(self):
full_path = self._get_dest_dist_paths() + self._path
env = Environment(full_path)
# this needs to be called whenever self._env is modified (or we could
# make an Environment subclass):
self._eggify_env_dest_dists(env, self._dest)
return env
def _env_rescan_dest(self):
self._env.scan(self._get_dest_dist_paths())
self._eggify_env_dest_dists(self._env, self._dest)
def _get_dest_dist_paths(self):
dest = self._dest
if dest is None:
return []
eggs = glob.glob(os.path.join(dest, '*.egg'))
dists = [os.path.dirname(dist_info) for dist_info in
glob.glob(os.path.join(dest, '*', '*.dist-info'))]
return list(set(eggs + dists))
@staticmethod
def _eggify_env_dest_dists(env, dest):
"""
Make sure everything found under `dest` is seen as an egg, even if it's
some other kind of dist.
"""
for project_name in env:
for dist in env[project_name]:
if os.path.dirname(dist.location) == dest:
dist.precedence = pkg_resources.EGG_DIST
def _version_conflict_information(self, name):
"""Return textual requirements/constraint information for debug purposes
We do a very simple textual search, as that filters out most
extraneous information without missing anything.
"""
output = [
"Version and requirements information containing %s:" % name]
version_constraint = self._versions.get(canonicalize_name(name))
if version_constraint:
output.append(
"[versions] constraint on %s: %s" % (name, version_constraint))
output += [line for line in self._requirements_and_constraints
if name.lower() in line.lower()]
return '\n '.join(output)
def _satisfied(self, req, source=None):
dists = [dist for dist in self._env[req.project_name] if dist in req]
if not dists:
logger.debug('We have no distributions for %s that satisfies %r.',
req.project_name, str(req))
return None, self._obtain(req, source)
# Note that dists are sorted from best to worst, as promised by
# env.__getitem__
for dist in dists:
if (dist.precedence == pkg_resources.DEVELOP_DIST):
logger.debug('We have a develop egg: %s', dist)
return dist, None
# Special common case, we have a specification for a single version:
specs = req.specs
if len(specs) == 1 and specs[0][0] == '==':
logger.debug('We have the distribution that satisfies %r.',
str(req))
return dists[0], None
if self._prefer_final:
fdists = [dist for dist in dists
if self._final_version(dist.parsed_version)
]
if fdists:
# There are final dists, so only use those
dists = fdists
if not self._newest:
# We don't need the newest, so we'll use the newest one we
# find, which is the first returned by
# Environment.__getitem__.
return dists[0], None
best_we_have = dists[0] # Because dists are sorted from best to worst
# We have some installed distros. There might, theoretically, be
# newer ones. Let's find out which ones are available and see if
# any are newer. We only do this if we're willing to install
# something, which is only true if dest is not None:
best_available = self._obtain(req, source)
if best_available is None:
# That's a bit odd. There aren't any distros available.
# We should use the best one we have that meets the requirement.
logger.debug(
'There are no distros available that meet %r.\n'
'Using our best, %s.',
str(req), best_we_have)
return best_we_have, None
if self._prefer_final:
if self._final_version(best_available.parsed_version):
if self._final_version(best_we_have.parsed_version):
if (best_we_have.parsed_version
<
best_available.parsed_version
):
return None, best_available
else:
return None, best_available
else:
if (not self._final_version(best_we_have.parsed_version)
and
(best_we_have.parsed_version
<
best_available.parsed_version
)
):
return None, best_available
else:
if (best_we_have.parsed_version
<
best_available.parsed_version
):
return None, best_available
logger.debug(
'We have the best distribution that satisfies %r.',
str(req))
return best_we_have, None
def _call_pip_install(self, spec, dest, dist):
tmp = tempfile.mkdtemp(dir=dest)
try:
paths = call_pip_install(spec, tmp)
dists = []
env = Environment(paths)
for project in env:
dists.extend(env[project])
if not dists:
raise zc.buildout.UserError("Couldn't install: %s" % dist)
if len(dists) > 1:
logger.warn("Installing %s\n"
"caused multiple distributions to be installed:\n"
"%s\n",
dist, '\n'.join(map(str, dists)))
else:
d = dists[0]
if d.project_name != dist.project_name:
logger.warn("Installing %s\n"
"Caused installation of a distribution:\n"
"%s\n"
"with a different project name.",
dist, d)
if d.version != dist.version:
logger.warn("Installing %s\n"
"Caused installation of a distribution:\n"
"%s\n"
"with a different version.",
dist, d)
result = []
for d in dists:
result.append(_move_to_eggs_dir_and_compile(d, dest))
return result
finally:
zc.buildout.rmtree.rmtree(tmp)
def _obtain(self, requirement, source=None):
# initialize out index for this project:
index = self._index
if index.obtain(requirement) is None:
# Nothing is available.
return None
# Filter the available dists for the requirement and source flag
dists = [dist for dist in index[requirement.project_name]
if ((dist in requirement)
and
((not source) or
(dist.precedence == pkg_resources.SOURCE_DIST)
)
)
]
# If we prefer final dists, filter for final and use the
# result if it is non empty.
if self._prefer_final:
fdists = [dist for dist in dists
if self._final_version(dist.parsed_version)
]
if fdists:
# There are final dists, so only use those
dists = fdists
# Now find the best one:
best = []
bestv = None
for dist in dists:
distv = dist.parsed_version
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if not best:
return None
if len(best) == 1:
return best[0]
if self._download_cache:
for dist in best:
if (realpath(os.path.dirname(dist.location))
==
self._download_cache
):
return dist
best.sort()
return best[-1]
def _fetch(self, dist, tmp, download_cache):
if (download_cache
and (realpath(os.path.dirname(dist.location)) == download_cache)
):
logger.debug("Download cache has %s at: %s", dist, dist.location)
return dist
logger.debug("Fetching %s from: %s", dist, dist.location)
new_location = self._index.download(dist.location, tmp)
if (download_cache
and (realpath(new_location) == realpath(dist.location))
and os.path.isfile(new_location)
):
# setuptools avoids making extra copies, but we want to copy
# to the download cache
shutil.copy2(new_location, tmp)
new_location = os.path.join(tmp, os.path.basename(new_location))
return dist.clone(location=new_location)
def _get_dist(self, requirement, ws):
__doing__ = 'Getting distribution for %r.', str(requirement)
# Maybe an existing dist is already the best dist that satisfies the
# requirement. If not, get a link to an available distribution that
# we could download. The method returns a tuple with an existing
# dist or an available dist. Either 'dist' is None, or 'avail'
# is None, or both are None.
dist, avail = self._satisfied(requirement)
if dist is None:
if self._dest is None:
raise zc.buildout.UserError(
"We don't have a distribution for %s\n"
"and can't install one in offline (no-install) mode.\n"
% requirement)
logger.info(*__doing__)
if avail is None:
# We have no existing dist, and none is available for download.
raise MissingDistribution(requirement, ws)
# We may overwrite distributions, so clear importer
# cache.
sys.path_importer_cache.clear()
tmp = self._download_cache
if tmp is None:
tmp = tempfile.mkdtemp('get_dist')
try:
dist = self._fetch(avail, tmp, self._download_cache)
if dist is None:
raise zc.buildout.UserError(
"Couldn't download distribution %s." % avail)
dists = [_move_to_eggs_dir_and_compile(dist, self._dest)]
for _d in dists:
if _d not in ws:
ws.add(_d, replace=True)
finally:
if tmp != self._download_cache:
zc.buildout.rmtree.rmtree(tmp)
self._env_rescan_dest()
dist = self._env.best_match(requirement, ws)
logger.info("Got %s.", dist)
else:
dists = [dist]
if dist not in ws:
ws.add(dist)
if not self._install_from_cache and self._use_dependency_links:
self._add_dependency_links_from_dists(dists)
if self._check_picked:
self._check_picked_requirement_versions(requirement, dists)
return dists
def _add_dependency_links_from_dists(self, dists):
reindex = False
links = self._links
for dist in dists:
if dist.has_metadata('dependency_links.txt'):
for link in dist.get_metadata_lines('dependency_links.txt'):
link = link.strip()
if link not in links:
logger.debug('Adding find link %r from %s',
link, dist)
links.append(link)
reindex = True
if reindex:
self._index = _get_index(self._index_url, links, self._allow_hosts)
def _check_picked_requirement_versions(self, requirement, dists):
""" Check whether we picked a version and, if we did, report it """
for dist in dists:
if not (dist.precedence == pkg_resources.DEVELOP_DIST
or
(len(requirement.specs) == 1
and
requirement.specs[0][0] == '==')
):
logger.debug('Picked: %s = %s',
dist.project_name, dist.version)
self._picked_versions[dist.project_name] = dist.version
if not self._allow_picked_versions:
msg = NOT_PICKED_AND_NOT_ALLOWED.format(
name=dist.project_name,
version=dist.version
)
raise zc.buildout.UserError(msg)
def _maybe_add_setuptools(self, ws, dist):
if dist_needs_pkg_resources(dist):
# We have a namespace package but no requirement for setuptools
if dist.precedence == pkg_resources.DEVELOP_DIST:
logger.warning(
"Develop distribution: %s\n"
"uses namespace packages but the distribution "
"does not require setuptools.",
dist)
requirement = self._constrain(
pkg_resources.Requirement.parse('setuptools')
)
if ws.find(requirement) is None:
self._get_dist(requirement, ws)
def _constrain(self, requirement):
"""Return requirement with optional [versions] constraint added."""
canonical_name = canonicalize_name(requirement.project_name)
constraint = self._versions.get(canonical_name)
if constraint:
try:
requirement = _constrained_requirement(constraint,
requirement)
except IncompatibleConstraintError:
logger.info(self._version_conflict_information(canonical_name))
raise
return requirement
def install(self, specs, working_set=None):
logger.debug('Installing %s.', repr(specs)[1:-1])
self._requirements_and_constraints.append(
"Base installation request: %s" % repr(specs)[1:-1])
for_buildout_run = bool(working_set)
requirements = [pkg_resources.Requirement.parse(spec)
for spec in specs]
requirements = [
self._constrain(requirement)
for requirement in requirements
if not requirement.marker or requirement.marker.evaluate()
]
if working_set is None:
ws = pkg_resources.WorkingSet([])
else:
ws = working_set
for requirement in requirements:
for dist in self._get_dist(requirement, ws):
self._maybe_add_setuptools(ws, dist)
# OK, we have the requested distributions and they're in the working
# set, but they may have unmet requirements. We'll resolve these
# requirements. This is code modified from
# pkg_resources.WorkingSet.resolve. We can't reuse that code directly
# because we have to constrain our requirements (see
# versions_section_ignored_for_dependency_in_favor_of_site_packages in
# zc.buildout.tests).
requirements.reverse() # Set up the stack.
processed = {} # This is a set of processed requirements.
best = {} # This is a mapping of package name -> dist.
# Note that we don't use the existing environment, because we want
# to look for new eggs unless what we have is the best that
# matches the requirement.
env = Environment(ws.entries)
while requirements:
# Process dependencies breadth-first.
current_requirement = requirements.pop(0)
req = self._constrain(current_requirement)
if req in processed:
# Ignore cyclic or redundant dependencies.
continue
dist = best.get(req.key)
if dist is None:
try:
dist = env.best_match(req, ws)
except pkg_resources.VersionConflict as err:
logger.debug(
"Version conflict while processing requirement %s "
"(constrained to %s)",
current_requirement, req)
# Installing buildout itself and its extensions and
# recipes requires the global
# ``pkg_resources.working_set`` to be active, which also
# includes all system packages. So there might be
# conflicts, which are fine to ignore. We'll grab the
# correct version a few lines down.
if not for_buildout_run:
raise VersionConflict(err, ws)
if dist is None:
if self._dest:
logger.debug('Getting required %r', str(req))
else:
logger.debug('Adding required %r', str(req))
self._log_requirement(ws, req)
for dist in self._get_dist(req, ws):
self._maybe_add_setuptools(ws, dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency.
logger.info(self._version_conflict_information(req.key))
raise VersionConflict(
pkg_resources.VersionConflict(dist, req), ws)
best[req.key] = dist
missing_requested = sorted(
set(req.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
if missing_requested:
if not self._allow_unknown_extras:
raise zc.buildout.UserError(
"Couldn't find the required extra. "
"This means the requirement is incorrect. "
"If the requirement is itself from software you "
"requested, then there might be a bug in "
"requested software. You can ignore this by "
"using 'allow-unknown-extras=true', however "
"that may simply cause needed software to be omitted."
)
extra_requirements = sorted(
set(dist.extras) & set(req.extras)
)
else:
extra_requirements = dist.requires(req.extras)[::-1]
for extra_requirement in extra_requirements:
self._requirements_and_constraints.append(
"Requirement of %s: %s" % (
current_requirement, extra_requirement))
requirements.extend(extra_requirements)
processed[req] = True
return ws
def build(self, spec, build_ext):
requirement = self._constrain(pkg_resources.Requirement.parse(spec))
dist, avail = self._satisfied(requirement, 1)
if dist is not None:
return [dist.location]
# Retrieve the dist:
if avail is None:
raise zc.buildout.UserError(
"Couldn't find a source distribution for %r."
% str(requirement))
if self._dest is None:
raise zc.buildout.UserError(
"We don't have a distribution for %s\n"
"and can't build one in offline (no-install) mode.\n"
% requirement
)
logger.debug('Building %r', spec)
tmp = self._download_cache
if tmp is None:
tmp = tempfile.mkdtemp('get_dist')
try:
dist = self._fetch(avail, tmp, self._download_cache)
build_tmp = tempfile.mkdtemp('build')
try:
setuptools.archive_util.unpack_archive(dist.location,
build_tmp)
base = build_tmp
if not os.path.exists(os.path.join(build_tmp, 'setup.py')):
setups = glob.glob(
os.path.join(build_tmp, '*', 'setup.py'))
if not setups:
# We used to raise an error, but now we just log a warning.
# Maybe there is a pyproject.toml file that pip can use.
# Otherwise we let pip do the complaining.
logger.warning(
"Couldn't find a setup script to build in %s. "
"Trying pip install anyway."
% os.path.basename(dist.location)
)
elif len(setups) > 1:
raise distutils.errors.DistutilsError(
"Multiple setup scripts in %s"
% os.path.basename(dist.location)
)
else:
base = os.path.dirname(setups[0])
setup_cfg = os.path.join(base, 'setup.cfg')
if not os.path.exists(setup_cfg):
f = open(setup_cfg, 'w')
f.close()
setuptools.command.setopt.edit_config(
setup_cfg, dict(build_ext=build_ext))
dists = self._call_pip_install(base, self._dest, dist)
return [dist.location for dist in dists]
finally:
zc.buildout.rmtree.rmtree(build_tmp)
finally:
if tmp != self._download_cache:
zc.buildout.rmtree.rmtree(tmp)
def _fix_file_links(self, links):
for link in links:
if link.startswith('file://') and link[-1] != '/':
if os.path.isdir(link[7:]):
# work around excessive restriction in setuptools:
link += '/'
yield link
def _log_requirement(self, ws, req):
if (not logger.isEnabledFor(logging.DEBUG) and
not Installer._store_required_by):
# Sorting the working set and iterating over it's requirements
# is expensive, so short circuit the work if it won't even be
# logged. When profiling a simple buildout with 10 parts with
# identical and large working sets, this resulted in a
# decrease of run time from 93.411 to 15.068 seconds, about a
# 6 fold improvement.
return
ws = list(ws)
ws.sort()
for dist in ws:
if req in dist.requires():
logger.debug(" required by %s." % dist)
req_ = str(req)
if req_ not in Installer._required_by:
Installer._required_by[req_] = set()
Installer._required_by[req_].add(str(dist.as_requirement()))
def _final_version(self, parsed_version):
return not parsed_version.is_prerelease
def normalize_versions(versions):
"""Return version dict with keys canonicalized.
PyPI is case-insensitive and not all distributions are consistent in
their own naming. Also, there are dashes, underscores, dots...
"""
return dict([(canonicalize_name(k), v) for (k, v) in versions.items()])
def default_versions(versions=None):
old = Installer._versions
if versions is not None:
Installer._versions = normalize_versions(versions)
return old
def download_cache(path=-1):
old = Installer._download_cache
if path != -1:
if path:
path = realpath(path)
Installer._download_cache = path
return old
def install_from_cache(setting=None):
old = Installer._install_from_cache
if setting is not None:
Installer._install_from_cache = bool(setting)
return old
def prefer_final(setting=None):
old = Installer._prefer_final
if setting is not None:
Installer._prefer_final = bool(setting)
return old
def use_dependency_links(setting=None):
old = Installer._use_dependency_links
if setting is not None:
Installer._use_dependency_links = bool(setting)
return old
def index_url(setting=None):
old = Installer._index_url
if setting is not None:
Installer._index_url = setting
return old
def allow_picked_versions(setting=None):
old = Installer._allow_picked_versions
if setting is not None:
Installer._allow_picked_versions = bool(setting)
return old
def store_required_by(setting=None):
old = Installer._store_required_by
if setting is not None:
Installer._store_required_by = bool(setting)
return old
def get_picked_versions():
picked_versions = sorted(Installer._picked_versions.items())
required_by = Installer._required_by
return (picked_versions, required_by)
def get_namespace_packages():
return sorted(Installer._namespace_packages.items())
def install(specs, dest,
links=(), index=None,
executable=sys.executable,
always_unzip=None, # Backward compat :/
path=None, working_set=None, newest=True, versions=None,
use_dependency_links=None, allow_hosts=('*',),
include_site_packages=None,
allowed_eggs_from_site_packages=None,
check_picked=True,
allow_unknown_extras=False,
):
assert executable == sys.executable, (executable, sys.executable)
assert include_site_packages is None
assert allowed_eggs_from_site_packages is None
installer = Installer(dest, links, index, sys.executable,
always_unzip, path,
newest, versions, use_dependency_links,
allow_hosts=allow_hosts,
check_picked=check_picked,
allow_unknown_extras=allow_unknown_extras)
return installer.install(specs, working_set)
buildout_and_setuptools_dists = list(install(['zc.buildout'], None,
check_picked=False))
buildout_and_setuptools_path = sorted({d.location
for d in buildout_and_setuptools_dists})
pip_dists = [d for d in buildout_and_setuptools_dists if d.project_name != 'zc.buildout']
pip_path = sorted({d.location for d in pip_dists})
logger.debug('after restricting versions: pip_path %r', pip_path)
pip_pythonpath = os.pathsep.join(pip_path)
setuptools_path = pip_path
setuptools_pythonpath = pip_pythonpath
def build(spec, dest, build_ext,
links=(), index=None,
executable=sys.executable,
path=None, newest=True, versions=None, allow_hosts=('*',)):
assert executable == sys.executable, (executable, sys.executable)
installer = Installer(dest, links, index, executable,
True, path, newest,
versions, allow_hosts=allow_hosts)
return installer.build(spec, build_ext)
def _rm(*paths):
for path in paths:
if os.path.isdir(path):
zc.buildout.rmtree.rmtree(path)
elif os.path.exists(path):
os.remove(path)
def _create_egg_link(directory, dest, egg_name):
"""Create egg-link file.
setuptools 80 basically removes its own 'setup.py develop' code, and
replaces it with 'pip install -e' (which then calls setuptools again,
but okay). See https://github.com/pypa/setuptools/pull/4955
This leads to a different outcome. There is no longer an .egg-link file
that we can copy.
So we create it ourselves, based on the previous setuptools code.
So what should be in the .egg-link file? Two lines: an egg path and a
relative setup.py path. For example with setuptools 79 we may have a
file zc.recipe.egg.egg-link with as contents two lines:
/Users/maurits/community/buildout/zc.recipe.egg_/src
../
The relative setup.py path on the second line does not seem really used,
but it should be there according to some checks, so let's try to get it
right. There is only so much we can do, but we support two common cases:
a src-layout and a layout with the code starting at the same level as
the setup.py file.
"""
egg_path = os.path.realpath(directory)
assert os.path.isdir(egg_path)
if not egg_name:
egg_name = os.path.basename(egg_path)
if 'src' in os.listdir(egg_path):
egg_path = os.path.join(egg_path, 'src')
setup_path = '..'
else:
setup_path = '.'
# Return TWO lines, so NO line ending on the last line.
contents = f"{egg_path}\n{setup_path}"
egg_link = os.path.join(dest, egg_name) + '.egg-link'
with open(egg_link, "w") as myfile:
myfile.write(contents)
return egg_link
def _copyeggs(src, dest, suffix, undo):
"""Copy eggs.
Expected is:
* 'src' is a temporary directory where the develop egg has been built.
* 'dest' is the 'develop-eggs' directory
* 'suffix' is '.egg-link'
* 'undo' is a list of cleanup actions that will be undone automatically
after this function returns (or throws an exception).
The only thing we need to do: find the file with the given suffix in src,
and move it to dest. This works until and including setuptools 79.
For setuptools 80+ we call _create_egg_link.
"""
egg_links = glob.glob(os.path.join(src, "*" + suffix))
if egg_links:
assert len(egg_links) == 1, str(egg_links)
egg_link = egg_links[0]
name = os.path.basename(egg_link)
new = os.path.join(dest, name)
_rm(new)
os.rename(egg_link, new)
return new
_develop_distutils_scripts = {}
def _detect_distutils_scripts(directory):
"""Record detected distutils scripts from develop eggs
``setup.py develop`` doesn't generate metadata on distutils scripts, in
contrast to ``setup.py install``. So we have to store the information for
later.
This won't find anything on setuptools 80.0.0+, because this does the
editable install with pip, instead of its previous own code. The result
is different. There is no egg-link file, so our code stops early.
Maybe we could skip this check, use a different way of getting the proper
egg_name, and still look for the 'EASY-INSTALL-DEV-SCRIPT' marker that
setuptools adds. But after setuptools 80.3.0 this marker is not set
anymore: the setuptools.command.easy_install module was first removed,
and later only partially restored.
So if we would change the logic here, it would only be potentially useful
for a very short range of setuptools versions.
Also, we look for distutils scripts, which sounds like something that is
long deprecated.
"""
dir_contents = os.listdir(directory)
# TODO For newer dists maybe just look for a 'bin' directory and get
# any script in there.
egginfo_filenames = [filename for filename in dir_contents
if filename.endswith('.egg-link')]
if not egginfo_filenames:
return
egg_name = egginfo_filenames[0].replace('.egg-link', '')
marker = 'EASY-INSTALL-DEV-SCRIPT'
scripts_found = []
for filename in dir_contents:
if filename.endswith('.exe'):
continue
filepath = os.path.join(directory, filename)
if not os.path.isfile(filepath):
continue
with open(filepath) as fp:
dev_script_content = fp.read()
if marker in dev_script_content:
# The distutils bin script points at the actual file we need.
for line in dev_script_content.splitlines():
match = DUNDER_FILE_PATTERN.search(line)
if match:
# The ``__file__ =`` line in the generated script points
# at the actual distutils script we need.
actual_script_filename = match.group('filename')
with open(actual_script_filename) as fp:
actual_script_content = fp.read()
scripts_found.append([filename, actual_script_content])
if scripts_found:
logger.debug(
"Distutils scripts found for develop egg %s: %s",
egg_name, scripts_found)
_develop_distutils_scripts[egg_name] = scripts_found
def develop(setup, dest,
build_ext=None,
executable=sys.executable):
"""Make a development/editable install of a package.
This expects to get a path to a directory or a file as the first argument.
If it is a file, we used to expect it to be a `setup.py` file.
And then we would basically call `python setup.py develop`.
Nowadays it could also be a `pyproject.toml` file.
Calling `setup.py develop` is a deprecated way of installing a package.
In setuptools 80 this still works, but setuptools has internally
changed to call `pip install`. Since zc.buildout 5 we also do that.
We basically ignore the file, and just get its directory instead.
With the `build_ext` option you can influence how C extensions in the
package are built. This may not be possible in a project that is using
hatchling, unless you have some hatchling extensions. So the current
code assumes you are using setuptools. It will create or edit a
`setup.cfg` file in the package directory and put the build_ext
options in there.
"""
assert executable == sys.executable, (executable, sys.executable)
if os.path.isdir(setup):
directory = setup
else:
directory = os.path.dirname(setup)
# We will be calling `pip install -e directory` later on. This works when
# directory is `src/something`. But if it is just `something`, pip will
# try to get `something` from PyPI, even if there is a sub directory
# `something`. So let's make it an absolute path.
# See https://github.com/buildout/buildout/issues/734
# Let's also handle '~/'.
directory = Path(directory).expanduser().resolve()
logger.debug("Making editable install of %s", setup)
undo = []
try:
if build_ext:
setup_cfg = os.path.join(directory, 'setup.cfg')
if os.path.exists(setup_cfg):
os.rename(setup_cfg, setup_cfg+'-develop-aside')
def restore_old_setup():
if os.path.exists(setup_cfg):
os.remove(setup_cfg)
os.rename(setup_cfg+'-develop-aside', setup_cfg)
undo.append(restore_old_setup)
else:
f = open(setup_cfg, 'w')
f.close()
undo.append(lambda: os.remove(setup_cfg))
setuptools.command.setopt.edit_config(
setup_cfg, dict(build_ext=build_ext))
tmp3 = tempfile.mkdtemp('build', dir=dest)
undo.append(lambda : zc.buildout.rmtree.rmtree(tmp3))
egg_name = call_pip_install(directory.as_uri(), tmp3, editable=True)
# output = get_subprocess_output(args)
# if log_level <= logging.DEBUG:
# print(output)
# This won't find anything on setuptools 80+.
# Can't be helped, I think.
_detect_distutils_scripts(tmp3)
# This won't find anything on setuptools 80+.
# But on older setuptools it still works fine.
egg_link = _copyeggs(tmp3, dest, '.egg-link', undo)
if egg_link:
logger.debug("Successfully made editable install: %s", egg_link)
return egg_link
egg_link = _create_egg_link(directory, dest, egg_name)
if egg_link:
logger.debug("Successfully made editable install: %s", egg_link)
return egg_link
logger.error(
"Failure making editable install: no egg-link created for %s",
setup,
)
finally:
undo.reverse()
[f() for f in undo]
def working_set(specs, executable, path=None,
include_site_packages=None,
allowed_eggs_from_site_packages=None):
# Backward compat:
if path is None:
path = executable
else:
assert executable == sys.executable, (executable, sys.executable)
assert include_site_packages is None
assert allowed_eggs_from_site_packages is None
return install(specs, None, path=path)
def scripts(reqs, working_set, executable, dest=None,
scripts=None,
extra_paths=(),
arguments='',
interpreter=None,
initialization='',
relative_paths=False,
):
assert executable == sys.executable, (executable, sys.executable)
path = [dist.location for dist in working_set]
path.extend(extra_paths)
# order preserving unique
unique_path = []
for p in path:
if p not in unique_path:
unique_path.append(p)
path = [realpath(p) for p in unique_path]
generated = []
if isinstance(reqs, str):
raise TypeError('Expected iterable of requirements or entry points,'
' got string.')
if initialization:
initialization = '\n'+initialization+'\n'
entry_points = []
distutils_scripts = []
for req in reqs:
if isinstance(req, str):
orig_req = pkg_resources.Requirement.parse(req)
if orig_req.marker and not orig_req.marker.evaluate():
continue
dist = None
if is_normalized_name(orig_req.name):
dist = working_set.find(orig_req)
if dist is None:
raise ValueError(
f"Could not find requirement '{orig_req.name}' in working set. "
)
else:
# First try finding the package by its canonical name.
canonicalized_name = canonicalize_name(orig_req.name)
canonical_req = pkg_resources.Requirement.parse(canonicalized_name)
dist = working_set.find(canonical_req)
if dist is None:
# Now try to find the package by the original name we got from
# the requirements. This may succeed with setuptools versions
# older than 75.8.2.
dist = working_set.find(orig_req)
if dist is None:
raise ValueError(
f"Could not find requirement '{orig_req.name}' in working "
f"set. Could not find it with normalized "
f"'{canonicalized_name}' either."
)
# regular console_scripts entry points
for name in pkg_resources.get_entry_map(dist, 'console_scripts'):
entry_point = dist.get_entry_info('console_scripts', name)
entry_points.append(
(name, entry_point.module_name,
'.'.join(entry_point.attrs))
)
# The metadata on "old-style" distutils scripts is not retained by
# distutils/setuptools, except by placing the original scripts in
# /EGG-INFO/scripts/.
if dist.metadata_isdir('scripts'):
# egg-info metadata from installed egg.
for name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + name):
# Probably Python 3 __pycache__ directory.
continue
if name.lower().endswith('.exe'):
# windows: scripts are implemented with 2 files
# the .exe gets also into metadata_listdir
# get_metadata chokes on the binary
continue
contents = dist.get_metadata('scripts/' + name)
distutils_scripts.append((name, contents))
elif dist.key in _develop_distutils_scripts:
# Development eggs don't have metadata about scripts, so we
# collected it ourselves in develop()/ and
# _detect_distutils_scripts().
for name, contents in _develop_distutils_scripts[dist.key]:
distutils_scripts.append((name, contents))
else:
entry_points.append(req)
entry_points_names = []
for name, module_name, attrs in entry_points:
entry_points_names.append(name)
if scripts is not None:
sname = scripts.get(name)
if sname is None:
continue
else:
sname = name
sname = os.path.join(dest, sname)
spath, rpsetup = _relative_path_and_setup(sname, path, relative_paths)
generated.extend(
_script(module_name, attrs, spath, sname, arguments,
initialization, rpsetup)
)
# warn when a script name passed in 'scripts' argument
# is not defined in an entry point.
if scripts is not None:
for name, target in scripts.items():
if name not in entry_points_names:
if name == target:
logger.warning("Could not generate script '%s' as it is not "
"defined in the egg entry points.", name)
else:
logger.warning("Could not generate script '%s' as script "
"'%s' is not defined in the egg entry points.", name, target)
for name, contents in distutils_scripts:
if scripts is not None:
sname = scripts.get(name)
if sname is None:
continue
else:
sname = name
sname = os.path.join(dest, sname)
spath, rpsetup = _relative_path_and_setup(sname, path, relative_paths)
generated.extend(
_distutils_script(spath, sname, contents, initialization, rpsetup)
)
if interpreter:
sname = os.path.join(dest, interpreter)
spath, rpsetup = _relative_path_and_setup(sname, path, relative_paths)
generated.extend(_pyscript(spath, sname, rpsetup, initialization))
return generated
def _relative_path_and_setup(sname, path, relative_paths):
if relative_paths:
relative_paths = os.path.normcase(relative_paths)
sname = os.path.normcase(os.path.abspath(sname))
spath = ',\n '.join(
[_relativitize(os.path.normcase(path_item), sname, relative_paths)
for path_item in path]
)
rpsetup = relative_paths_setup
for i in range(_relative_depth(relative_paths, sname)):
rpsetup += "base = os.path.dirname(base)\n"
else:
spath = repr(path)[1:-1].replace(', ', ',\n ')
rpsetup = ''
return spath, rpsetup
def _relative_depth(common, path):
n = 0
while 1:
dirname = os.path.dirname(path)
if dirname == path:
raise AssertionError("dirname of %s is the same" % dirname)
if dirname == common:
break
n += 1
path = dirname
return n
def _relative_path(common, path):
r = []
while 1:
dirname, basename = os.path.split(path)
r.append(basename)
if dirname == common:
break
if dirname == path:
raise AssertionError("dirname of %s is the same" % dirname)
path = dirname
r.reverse()
return os.path.join(*r)
def _relativitize(path, script, relative_paths):
if path == script:
raise AssertionError("path == script")
if path == relative_paths:
return "base"
common = os.path.dirname(os.path.commonprefix([path, script]))
if (common == relative_paths or
common.startswith(os.path.join(relative_paths, ''))
):
return "join(base, %r)" % _relative_path(common, path)
else:
return repr(path)
relative_paths_setup = """
import os
join = os.path.join
base = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
"""
def _script(module_name, attrs, path, dest, arguments, initialization, rsetup):
if is_win32:
dest += '-script.py'
python = _safe_arg(sys.executable)
contents = script_template % dict(
python = python,
path = path,
module_name = module_name,
attrs = attrs,
arguments = arguments,
initialization = initialization,
relative_paths_setup = rsetup,
)
return _create_script(contents, dest)
def _distutils_script(path, dest, script_content, initialization, rsetup):
if is_win32:
dest += '-script.py'
lines = script_content.splitlines(True)
if not ('#!' in lines[0]) and ('python' in lines[0]):
# The script doesn't follow distutil's rules. Ignore it.
return []
lines = lines[1:] # Strip off the first hashbang line.
line_with_first_import = len(lines)
for line_number, line in enumerate(lines):
if not 'import' in line:
continue
if not (line.startswith('import') or line.startswith('from')):
continue
if '__future__' in line:
continue
line_with_first_import = line_number
break
before = ''.join(lines[:line_with_first_import])
after = ''.join(lines[line_with_first_import:])
python = _safe_arg(sys.executable)
contents = distutils_script_template % dict(
python = python,
path = path,
initialization = initialization,
relative_paths_setup = rsetup,
before = before,
after = after
)
return _create_script(contents, dest)
def _file_changed(filename, old_contents, mode='r'):
try:
with open(filename, mode) as f:
return f.read() != old_contents
except EnvironmentError as e:
if e.errno == errno.ENOENT:
return True
else:
raise
def _create_script(contents, dest):
generated = []
script = dest
changed = _file_changed(dest, contents)
if is_win32:
# generate exe file and give the script a magic name:
win32_exe = os.path.splitext(dest)[0] # remove ".py"
if win32_exe.endswith('-script'):
win32_exe = win32_exe[:-7] # remove "-script"
win32_exe = win32_exe + '.exe' # add ".exe"
new_data = get_win_launcher('cli')
if _file_changed(win32_exe, new_data, 'rb'):
# Only write it if it's different.
with open(win32_exe, 'wb') as f:
f.write(new_data)
generated.append(win32_exe)
if changed:
with open(dest, 'w') as f:
f.write(contents)
logger.info(
"Generated script %r.",
# Normalize for windows
script.endswith('-script.py') and script[:-10] or script)
try:
os.chmod(dest, _execute_permission())
except (AttributeError, os.error):
pass
generated.append(dest)
return generated
if is_jython and jython_os_name == 'linux':
script_header = '#!/usr/bin/env %(python)s'
else:
script_header = '#!%(python)s'
script_template = script_header + '''\
%(relative_paths_setup)s
import sys
sys.path[0:0] = [
%(path)s,
]
%(initialization)s
import %(module_name)s
if __name__ == '__main__':
sys.exit(%(module_name)s.%(attrs)s(%(arguments)s))
'''
distutils_script_template = script_header + '''
%(before)s
%(relative_paths_setup)s
import sys
sys.path[0:0] = [
%(path)s,
]
%(initialization)s
%(after)s'''
def _pyscript(path, dest, rsetup, initialization=''):
generated = []
script = dest
if is_win32:
dest += '-script.py'
python = _safe_arg(sys.executable)
if path:
path += ',' # Courtesy comma at the end of the list.
contents = py_script_template % dict(
python = python,
path = path,
relative_paths_setup = rsetup,
initialization=initialization,
)
changed = _file_changed(dest, contents)
if is_win32:
# generate exe file and give the script a magic name:
exe = script + '.exe'
with open(exe, 'wb') as f:
f.write(
pkg_resources.resource_string('setuptools', 'cli.exe')
)
generated.append(exe)
if changed:
with open(dest, 'w') as f:
f.write(contents)
try:
os.chmod(dest, _execute_permission())
except (AttributeError, os.error):
pass
logger.info("Generated interpreter %r.", script)
generated.append(dest)
return generated
py_script_template = script_header + '''\
%(relative_paths_setup)s
import sys
sys.path[0:0] = [
%(path)s
]
%(initialization)s
_interactive = True
if len(sys.argv) > 1:
# The Python interpreter wrapper allows only some of the options that a
# "regular" Python interpreter accepts.
_options, _args = __import__("getopt").getopt(sys.argv[1:], 'Iic:m:')
_interactive = False
for (_opt, _val) in _options:
if _opt == '-i':
_interactive = True
elif _opt == '-c':
exec(_val)
elif _opt == '-m':
sys.argv[1:] = _args
_args = []
__import__("runpy").run_module(
_val, {}, "__main__", alter_sys=True)
elif _opt == '-I':
# Allow yet silently ignore the `-I` option. The original behaviour
# for this option is to create an isolated Python runtime. It was
# deemed acceptable to allow the option here as this Python wrapper
# is isolated from the system Python already anyway.
# The specific use-case that led to this change is how the Python
# language extension for Visual Studio Code calls the Python
# interpreter when initializing the extension.
pass
if _args:
sys.argv[:] = _args
__file__ = _args[0]
del _options, _args
with open(__file__) as __file__f:
exec(compile(__file__f.read(), __file__, "exec"))
if _interactive:
del _interactive
__import__("code").interact(banner="", local=globals())
'''
runsetup_template = """
import sys
sys.path.insert(0, %%(setupdir)r)
sys.path[0:0] = %r
import os, setuptools
%%(extra)s
__file__ = %%(__file__)r
os.chdir(%%(setupdir)r)
sys.argv[0] = %%(setup)r
with open(%%(setup)r) as f:
exec(compile(f.read(), %%(setup)r, 'exec'))
""" % setuptools_path
disable_root_logger = """
import logging
root_logger = logging.getLogger()
handler = logging.NullHandler()
root_logger.addHandler(handler)
"""
| Installer |
python | Pylons__pyramid | docs/quick_tutorial/request_response/tutorial/tests.py | {
"start": 47,
"end": 969
} | class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_home(self):
from .views import TutorialViews
request = testing.DummyRequest()
inst = TutorialViews(request)
response = inst.home()
self.assertEqual(response.status, '302 Found')
def test_plain_without_name(self):
from .views import TutorialViews
request = testing.DummyRequest()
inst = TutorialViews(request)
response = inst.plain()
self.assertIn(b'No Name Provided', response.body)
def test_plain_with_name(self):
from .views import TutorialViews
request = testing.DummyRequest()
request.GET['name'] = 'Jane Doe'
inst = TutorialViews(request)
response = inst.plain()
self.assertIn(b'Jane Doe', response.body)
| TutorialViewTests |
python | gwtw__py-sorting | test/heapsort_test.py | {
"start": 403,
"end": 741
} | class ____(unittest.TestCase,
BaseCustomComparisonSortTest,
BasePositiveIntegerSortTest,
BaseNegativeIntegerSortTest,
BaseStringSortTest):
def setUp(self):
self.sort = heapsort.sort
if __name__ == '__main__':
unittest.main()
| HeapsortSortTest |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 5045,
"end": 7585
} | class ____(ToolBase):
"""
Toggleable tool.
Every time it is triggered, it switches between enable and disable.
Parameters
----------
``*args``
Variable length argument to be used by the Tool.
``**kwargs``
`toggled` if present and True, sets the initial state of the Tool
Arbitrary keyword arguments to be consumed by the Tool
"""
radio_group = None
"""
Attribute to group 'radio' like tools (mutually exclusive).
`str` that identifies the group or **None** if not belonging to a group.
"""
cursor = None
"""Cursor to use when the tool is active."""
default_toggled = False
"""Default of toggled state."""
def __init__(self, *args, **kwargs):
self._toggled = kwargs.pop('toggled', self.default_toggled)
super().__init__(*args, **kwargs)
def trigger(self, sender, event, data=None):
"""Calls `enable` or `disable` based on `~ToolToggleBase.toggled` value."""
if self._toggled:
self.disable(event)
else:
self.enable(event)
self._toggled = not self._toggled
def enable(self, event=None):
"""
Enable the toggle tool.
`trigger` calls this method when `~ToolToggleBase.toggled` is False.
"""
pass
def disable(self, event=None):
"""
Disable the toggle tool.
`trigger` call this method when `~ToolToggleBase.toggled` is True.
This can happen in different circumstances.
* Click on the toolbar tool button.
* Call to `matplotlib.backend_managers.ToolManager.trigger_tool`.
* Another `ToolToggleBase` derived tool is triggered
(from the same `.ToolManager`).
"""
pass
@property
def toggled(self):
"""State of the toggled tool."""
return self._toggled
def set_figure(self, figure):
toggled = self.toggled
if toggled:
if self.figure:
self.trigger(self, None)
else:
# if no figure the internal state is not changed
# we change it here so next call to trigger will change it back
self._toggled = False
super().set_figure(figure)
if toggled:
if figure:
self.trigger(self, None)
else:
# if there is no figure, trigger won't change the internal
# state we change it back
self._toggled = True
| ToolToggleBase |
python | walkccc__LeetCode | solutions/2330. Valid Palindrome IV/2330.py | {
"start": 0,
"end": 254
} | class ____:
def makePalindrome(self, s: str) -> bool:
change = 0
l = 0
r = len(s) - 1
while l < r:
if s[l] != s[r]:
change += 1
if change > 2:
return False
l += 1
r -= 1
return True
| Solution |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/triggers/wasb.py | {
"start": 3944,
"end": 7456
} | class ____(BaseTrigger):
"""
Check for the existence of a blob with the given prefix in the provided container.
WasbPrefixSensorTrigger is fired as a deferred class with params to run the task in trigger.
:param container_name: name of the container in which the blob should be searched for
:param prefix: prefix of the blob to check existence for
:param include: specifies one or more additional datasets to include in the
response. Options include: ``snapshots``, ``metadata``, ``uncommittedblobs``,
``copy``, ``deleted``
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:param wasb_conn_id: the connection identifier for connecting to Azure WASB
:param check_options: Optional keyword arguments that
`WasbAsyncHook.check_for_prefix_async()` takes.
:param public_read: whether an anonymous public read access should be used. Default is False
:param poke_interval: polling period in seconds to check for the status
"""
def __init__(
self,
container_name: str,
prefix: str,
wasb_conn_id: str = "wasb_default",
check_options: dict | None = None,
public_read: bool = False,
poke_interval: float = 5.0,
):
if not check_options:
check_options = {}
super().__init__()
self.container_name = container_name
self.prefix = prefix
self.wasb_conn_id = wasb_conn_id
self.check_options = check_options
self.poke_interval = poke_interval
self.public_read = public_read
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize WasbPrefixSensorTrigger arguments and classpath."""
return (
"airflow.providers.microsoft.azure.triggers.wasb.WasbPrefixSensorTrigger",
{
"container_name": self.container_name,
"prefix": self.prefix,
"wasb_conn_id": self.wasb_conn_id,
"poke_interval": self.poke_interval,
"check_options": self.check_options,
"public_read": self.public_read,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make async connection to Azure WASB and polls for existence of a blob with given prefix."""
prefix_exists = False
hook = WasbAsyncHook(wasb_conn_id=self.wasb_conn_id, public_read=self.public_read)
try:
async with await hook.get_async_conn():
while not prefix_exists:
prefix_exists = await hook.check_for_prefix_async(
container_name=self.container_name, prefix=self.prefix, **self.check_options
)
if prefix_exists:
message = f"Prefix {self.prefix} found in container {self.container_name}."
yield TriggerEvent({"status": "success", "message": message})
return
else:
message = (
f"Prefix {self.prefix} not available yet in container {self.container_name}."
f" Sleeping for {self.poke_interval} seconds"
)
self.log.info(message)
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
| WasbPrefixSensorTrigger |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 28385,
"end": 30868
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.num_resolutions = len(config.channel_multiplier)
self.num_res_blocks = config.num_res_blocks
quant_channels = config.embed_dim
block_in = config.base_channels * config.channel_multiplier[-1]
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
attn_norms = nn.ModuleList()
block_out = config.base_channels * config.channel_multiplier[i_level]
for i_block in range(self.num_res_blocks + 1):
block.append(
Emu3VQVAEResnetBlock(
in_channels=block_in,
out_channels=block_out,
quant_channels=quant_channels,
)
)
block_in = block_out
if i_level in config.attn_resolutions:
attn.append(Emu3VQVAEAttentionBlock(config))
attn_norms.append(Emu3VQVAESpatialNorm(quant_channels, block_in))
up = nn.Module()
up.block = block
up.attn = attn
up.attn_norms = attn_norms
if i_level != 0:
up.upsample = Emu3VQVAEEncoderConvUpsample(block_in)
self.up.insert(0, up)
def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor):
for i_level, blocks in enumerate(self.up[::-1]):
for i_block in range(self.num_res_blocks + 1):
hidden_states = blocks.block[i_block](hidden_states, quant_states)
if len(blocks.attn) > 0:
residual = hidden_states
hidden_states = blocks.attn_norms[i_block](hidden_states, quant_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = blocks.attn[i_block](hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
if i_level != len(self.up) - 1:
hidden_states = blocks.upsample(hidden_states)
return hidden_states
| Emu3VQVAEUpBlock |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/links/step_function.py | {
"start": 952,
"end": 1514
} | class ____(BaseAwsLink):
"""Helper class for constructing link to State Machine details page."""
name = "State Machine Details"
key = "_state_machine_details"
format_str = (
BASE_AWS_CONSOLE_LINK + "/states/home?region={region_name}#/statemachines/view/{state_machine_arn}"
)
def format_link(self, *, state_machine_arn: str | None = None, **kwargs) -> str:
if not state_machine_arn:
return ""
return super().format_link(state_machine_arn=quote_plus(state_machine_arn), **kwargs)
| StateMachineDetailsLink |
python | gevent__gevent | src/gevent/_ffi/watcher.py | {
"start": 17895,
"end": 17949
} | class ____(object):
_watcher_type = 'idle'
| IdleMixin |
python | ipython__ipython | IPython/core/autocall.py | {
"start": 1319,
"end": 1593
} | class ____(IPyAutocall):
"""An autocallable object which will be added to the user namespace so that
exit, exit(), quit or quit() are all valid ways to close the shell."""
rewrite = False
def __call__(self):
self._ip.ask_exit()
| ExitAutocall |
python | huggingface__transformers | tests/generation/test_candidate_generator.py | {
"start": 4280,
"end": 4760
} | class ____:
"""A simple mock tokenizer class that supports weak references."""
def __init__(self, vocab=None):
self._vocab = vocab or {}
def get_vocab(self):
return self._vocab
def __call__(self, text, add_special_tokens=True):
# Mock implementation of the __call__ method
tokens = text.split()
input_ids = [self._vocab.get(token, 0) for token in tokens]
return {"input_ids": input_ids}
@require_torch
| MockTokenizer |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 26598,
"end": 27467
} | class ____(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
| Conic |
python | pandas-dev__pandas | pandas/tests/indexes/numeric/test_astype.py | {
"start": 134,
"end": 3618
} | class ____:
def test_astype_float64_to_uint64(self):
# GH#45309 used to incorrectly return Index with int64 dtype
idx = Index([0.0, 5.0, 10.0, 15.0, 20.0], dtype=np.float64)
result = idx.astype("u8")
expected = Index([0, 5, 10, 15, 20], dtype=np.uint64)
tm.assert_index_equal(result, expected, exact=True)
idx_with_negatives = idx - 10
with pytest.raises(ValueError, match="losslessly"):
idx_with_negatives.astype(np.uint64)
def test_astype_float64_to_object(self):
float_index = Index([0.0, 2.5, 5.0, 7.5, 10.0], dtype=np.float64)
result = float_index.astype(object)
assert result.equals(float_index)
assert float_index.equals(result)
assert isinstance(result, Index) and result.dtype == object
def test_astype_float64_mixed_to_object(self):
# mixed int-float
idx = Index([1.5, 2, 3, 4, 5], dtype=np.float64)
idx.name = "foo"
result = idx.astype(object)
assert result.equals(idx)
assert idx.equals(result)
assert isinstance(result, Index) and result.dtype == object
@pytest.mark.parametrize("dtype", ["int16", "int32", "int64"])
def test_astype_float64_to_int_dtype(self, dtype):
# GH#12881
# a float astype int
idx = Index([0, 1, 2], dtype=np.float64)
result = idx.astype(dtype)
expected = Index([0, 1, 2], dtype=dtype)
tm.assert_index_equal(result, expected, exact=True)
idx = Index([0, 1.1, 2], dtype=np.float64)
result = idx.astype(dtype)
expected = Index([0, 1, 2], dtype=dtype)
tm.assert_index_equal(result, expected, exact=True)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_astype_float64_to_float_dtype(self, dtype):
# GH#12881
# a float astype int
idx = Index([0, 1, 2], dtype=np.float64)
result = idx.astype(dtype)
assert isinstance(result, Index) and result.dtype == dtype
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_astype_float_to_datetimelike(self, dtype):
# GH#49660 pre-2.0 Index.astype from floating to M8/m8/Period raised,
# inconsistent with Series.astype
idx = Index([0, 1.1, 2], dtype=np.float64)
result = idx.astype(dtype)
if dtype[0] == "M":
expected = to_datetime(idx.values)
else:
expected = to_timedelta(idx.values)
tm.assert_index_equal(result, expected)
# check that we match Series behavior
result = idx.to_series().set_axis(range(3)).astype(dtype)
expected = expected.to_series().set_axis(range(3))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [int, "int16", "int32", "int64"])
@pytest.mark.parametrize("non_finite", [np.inf, np.nan])
def test_cannot_cast_inf_to_int(self, non_finite, dtype):
# GH#13149
idx = Index([1, 2, non_finite], dtype=np.float64)
msg = r"Cannot convert non-finite values \(NA or inf\) to integer"
with pytest.raises(ValueError, match=msg):
idx.astype(dtype)
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype="object")
result = index.astype(float)
expected = Index([1.0, np.nan, 0.2], dtype=np.float64)
assert result.dtype == expected.dtype
tm.assert_index_equal(result, expected)
| TestAstype |
python | PrefectHQ__prefect | src/prefect/cache_policies.py | {
"start": 10890,
"end": 12749
} | class ____(CachePolicy):
"""
Policy that computes a cache key based on a hash of the runtime inputs provided to the task..
"""
exclude: list[str] = field(default_factory=lambda: [])
def compute_key(
self,
task_ctx: TaskRunContext,
inputs: dict[str, Any],
flow_parameters: dict[str, Any],
**kwargs: Any,
) -> Optional[str]:
hashed_inputs = {}
inputs = inputs or {}
exclude = self.exclude or []
if not inputs:
return None
for key, val in inputs.items():
if key not in exclude:
transformer = STABLE_TRANSFORMS.get(type(val)) # type: ignore[reportUnknownMemberType]
hashed_inputs[key] = transformer(val) if transformer else val
try:
return hash_objects(hashed_inputs, raise_on_failure=True)
except HashError as exc:
msg = (
f"{exc}\n\n"
"This often occurs when task inputs contain objects that cannot be cached "
"like locks, file handles, or other system resources.\n\n"
"To resolve this, you can:\n"
" 1. Exclude these arguments by defining a custom `cache_key_fn`\n"
" 2. Disable caching by passing `cache_policy=NO_CACHE`\n"
)
raise ValueError(msg) from exc
def __sub__(self, other: str) -> "CachePolicy":
if not isinstance(other, str): # type: ignore[reportUnnecessaryIsInstance]
raise TypeError("Can only subtract strings from key policies.")
return Inputs(exclude=self.exclude + [other])
_register_stable_transforms()
INPUTS = Inputs()
NONE = _None()
NO_CACHE = _None()
TASK_SOURCE = TaskSource()
FLOW_PARAMETERS = FlowParameters()
RUN_ID = RunId()
DEFAULT = INPUTS + TASK_SOURCE + RUN_ID
| Inputs |
python | huggingface__transformers | src/transformers/models/tapas/modeling_tapas.py | {
"start": 21236,
"end": 21934
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = TapasPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Tapas
| TapasLMPredictionHead |
python | imageio__imageio | imageio/plugins/feisem.py | {
"start": 725,
"end": 3360
} | class ____(TiffFormat):
"""See :mod:`imageio.plugins.feisem`"""
def _can_write(self, request):
return False # FEI-SEM only supports reading
class Reader(TiffFormat.Reader):
def _get_data(self, index=0, discard_watermark=True, watermark_height=70):
"""Get image and metadata from given index.
FEI images usually (always?) contain a watermark at the
bottom of the image, 70 pixels high. We discard this by
default as it does not contain any information not present
in the metadata.
"""
im, meta = super(FEISEMFormat.Reader, self)._get_data(index)
if discard_watermark:
im = im[:-watermark_height]
return im, meta
def _get_meta_data(self, index=None):
"""Read the metadata from an FEI SEM TIFF.
This metadata is included as ASCII text at the end of the file.
The index, if provided, is ignored.
Returns
-------
metadata : dict
Dictionary of metadata.
"""
if hasattr(self, "_fei_meta"):
return self._fei_meta
md = {"root": {}}
current_tag = "root"
reading_metadata = False
filename = self.request.get_local_filename()
with open(filename, encoding="utf8", errors="ignore") as fin:
for line in fin:
if not reading_metadata:
if not line.startswith("Date="):
continue
else:
reading_metadata = True
line = line.rstrip()
if line.startswith("["):
current_tag = line.lstrip("[").rstrip("]")
md[current_tag] = {}
else:
if "=" in line: # ignore empty and irrelevant lines
key, val = line.split("=", maxsplit=1)
for tag_type in (int, float):
try:
val = tag_type(val)
except ValueError:
continue
else:
break
md[current_tag][key] = val
if not md["root"] and len(md) == 1:
raise ValueError("Input file %s contains no FEI metadata." % filename)
self._fei_meta = md
return md
| FEISEMFormat |
python | getsentry__sentry | src/sentry/snuba/query_sources.py | {
"start": 24,
"end": 129
} | class ____(Enum):
FRONTEND = "frontend"
API = "api"
SENTRY_BACKEND = "sentry_backend"
| QuerySource |
python | huggingface__transformers | src/transformers/models/llava_next/image_processing_llava_next.py | {
"start": 1602,
"end": 3709
} | class ____(ImagesKwargs, total=False):
r"""
image_grid_pinpoints (`list[list[int]]`, *optional*):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method.
"""
image_grid_pinpoints: list[list[int]]
def divide_to_patches(image: np.ndarray, patch_size: int, input_data_format) -> list[np.ndarray]:
"""
Divides an image into patches of a specified size.
Args:
image (`np.ndarray`):
The input image.
patch_size (`int`):
The size of each patch.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
list: A list of np.ndarray representing the patches.
"""
patches = []
height, width = get_image_size(image, channel_dim=input_data_format)
for i in range(0, height, patch_size):
for j in range(0, width, patch_size):
if input_data_format == ChannelDimension.LAST:
patch = image[i : i + patch_size, j : j + patch_size]
else:
patch = image[:, i : i + patch_size, j : j + patch_size]
patches.append(patch)
return patches
def expand_to_square(image: np.ndarray, background_color, input_data_format) -> np.ndarray:
"""
Expands an image to a square by adding a background color.
"""
height, width = get_image_size(image, channel_dim=input_data_format)
if width == height:
return image
elif width > height:
result = np.ones((width, width, image.shape[2]), dtype=image.dtype) * background_color
result[(width - height) // 2 : (width - height) // 2 + height, :] = image
return result
else:
result = np.ones((height, height, image.shape[2]), dtype=image.dtype) * background_color
result[:, (height - width) // 2 : (height - width) // 2 + width] = image
return result
| LlavaNextImageProcessorKwargs |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 8949,
"end": 10580
} | class ____(GeneratedAirbyteDestination):
@public
def __init__(
self,
name: str,
host: str,
port: int,
database: str,
username: str,
password: Optional[str] = None,
jdbc_url_params: Optional[str] = None,
ssl: Optional[bool] = None,
):
"""Airbyte Destination for Clickhouse.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/clickhouse
Args:
name (str): The name of the destination.
host (str): Hostname of the database.
port (int): HTTP port of the database.
database (str): Name of the database.
username (str): Username to use to access the database.
password (Optional[str]): Password associated with the username.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
ssl (Optional[bool]): Encrypt data using SSL.
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.database = check.str_param(database, "database")
self.username = check.str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
self.ssl = check.opt_bool_param(ssl, "ssl")
super().__init__("Clickhouse", name)
| ClickhouseDestination |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 10126,
"end": 59297
} | class ____(
DialectKWArgs, HasSchemaAttr, TableClause, inspection.Inspectable["Table"]
):
r"""Represent a table in a database.
e.g.::
mytable = Table(
"mytable",
metadata,
Column("mytable_id", Integer, primary_key=True),
Column("value", String(50)),
)
The :class:`_schema.Table`
object constructs a unique instance of itself based
on its name and optional schema name within the given
:class:`_schema.MetaData` object. Calling the :class:`_schema.Table`
constructor with the same name and same :class:`_schema.MetaData` argument
a second time will return the *same* :class:`_schema.Table`
object - in this way
the :class:`_schema.Table` constructor acts as a registry function.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "table"
if TYPE_CHECKING:
@util.ro_non_memoized_property
def primary_key(self) -> PrimaryKeyConstraint: ...
@util.ro_non_memoized_property
def foreign_keys(self) -> Set[ForeignKey]: ...
_columns: DedupeColumnCollection[Column[Any]] # type: ignore[assignment]
_sentinel_column: Optional[Column[Any]]
constraints: Set[Constraint]
"""A collection of all :class:`_schema.Constraint` objects associated with
this :class:`_schema.Table`.
Includes :class:`_schema.PrimaryKeyConstraint`,
:class:`_schema.ForeignKeyConstraint`, :class:`_schema.UniqueConstraint`,
:class:`_schema.CheckConstraint`. A separate collection
:attr:`_schema.Table.foreign_key_constraints` refers to the collection
of all :class:`_schema.ForeignKeyConstraint` objects, and the
:attr:`_schema.Table.primary_key` attribute refers to the single
:class:`_schema.PrimaryKeyConstraint` associated with the
:class:`_schema.Table`.
.. seealso::
:attr:`_schema.Table.constraints`
:attr:`_schema.Table.primary_key`
:attr:`_schema.Table.foreign_key_constraints`
:attr:`_schema.Table.indexes`
:class:`_reflection.Inspector`
"""
indexes: Set[Index]
"""A collection of all :class:`_schema.Index` objects associated with this
:class:`_schema.Table`.
.. seealso::
:meth:`_reflection.Inspector.get_indexes`
"""
if TYPE_CHECKING:
@util.ro_non_memoized_property
def columns(self) -> ReadOnlyColumnCollection[str, Column[Any]]: ...
@util.ro_non_memoized_property
def exported_columns(
self,
) -> ReadOnlyColumnCollection[str, Column[Any]]: ...
@util.ro_non_memoized_property
def c(self) -> ReadOnlyColumnCollection[str, Column[Any]]: ...
def _gen_cache_key(
self, anon_map: anon_map, bindparams: List[BindParameter[Any]]
) -> Tuple[Any, ...]:
if self._annotations:
return (self,) + self._annotations_cache_key
else:
return (self,)
if not typing.TYPE_CHECKING:
# typing tools seem to be inconsistent in how they handle
# __new__, so suggest this pattern for classes that use
# __new__. apply typing to the __init__ method normally
@util.deprecated_params(
mustexist=(
"1.4",
"Deprecated alias of :paramref:`_schema.Table.must_exist`",
),
)
def __new__(cls, *args: Any, **kw: Any) -> Any:
return cls._new(*args, **kw)
@classmethod
def _new(cls, *args: Any, **kw: Any) -> Any:
if not args and not kw:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError(
"Table() takes at least two positional-only "
"arguments 'name' and 'metadata'"
)
schema = kw.get("schema", None)
if schema is None:
schema = metadata.schema
elif schema is BLANK_SCHEMA:
schema = None
keep_existing = kw.get("keep_existing", False)
extend_existing = kw.get("extend_existing", False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
must_exist = kw.pop("must_exist", kw.pop("mustexist", False))
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
f"Table '{key}' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object."
)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if must_exist:
raise exc.InvalidRequestError(f"Table '{key}' not defined")
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table.__init__(name, metadata, *args, _no_init=False, **kw) # type: ignore[misc] # noqa: E501
table.dispatch.after_parent_attach(table, metadata)
return table
except Exception:
with util.safe_reraise():
metadata._remove_table(name, schema)
def __init__(
self,
name: str,
metadata: MetaData,
*args: SchemaItem,
schema: Optional[Union[str, Literal[SchemaConst.BLANK_SCHEMA]]] = None,
quote: Optional[bool] = None,
quote_schema: Optional[bool] = None,
autoload_with: Optional[Union[Engine, Connection]] = None,
autoload_replace: bool = True,
keep_existing: bool = False,
extend_existing: bool = False,
resolve_fks: bool = True,
include_columns: Optional[Collection[str]] = None,
implicit_returning: bool = True,
comment: Optional[str] = None,
info: Optional[Dict[Any, Any]] = None,
listeners: Optional[
_typing_Sequence[Tuple[str, Callable[..., Any]]]
] = None,
prefixes: Optional[_typing_Sequence[str]] = None,
_creator_ddl: TableCreateDDL | None = None,
_dropper_ddl: TableDropDDL | None = None,
# used internally in the metadata.reflect() process
_extend_on: Optional[Set[Table]] = None,
# used by __new__ to bypass __init__
_no_init: bool = True,
# dialect-specific keyword args
**kw: Any,
) -> None:
r"""Constructor for :class:`_schema.Table`.
:param name: The name of this table as represented in the database.
The table name, along with the value of the ``schema`` parameter,
forms a key which uniquely identifies this :class:`_schema.Table`
within
the owning :class:`_schema.MetaData` collection.
Additional calls to :class:`_schema.Table` with the same name,
metadata,
and schema name will return the same :class:`_schema.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word or contain special characters.
A name with any number of upper case characters is considered
to be case sensitive, and will be sent as quoted.
To enable unconditional quoting for the table name, specify the flag
``quote=True`` to the constructor, or use the :class:`.quoted_name`
construct to specify the name.
:param metadata: a :class:`_schema.MetaData`
object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`.Connection` or :class:`.Engine`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`_schema.Column`
objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and
:class:`_schema.ForeignKeyConstraint`.
:param autoload_replace: Defaults to ``True``; when using
:paramref:`_schema.Table.autoload_with`
in conjunction with :paramref:`_schema.Table.extend_existing`,
indicates
that :class:`_schema.Column` objects present in the already-existing
:class:`_schema.Table`
object should be replaced with columns of the same
name retrieved from the autoload process. When ``False``, columns
already present under existing names will be omitted from the
reflection process.
Note that this setting does not impact :class:`_schema.Column` objects
specified programmatically within the call to :class:`_schema.Table`
that
also is autoloading; those :class:`_schema.Column` objects will always
replace existing columns of the same name when
:paramref:`_schema.Table.extend_existing` is ``True``.
.. seealso::
:paramref:`_schema.Table.autoload_with`
:paramref:`_schema.Table.extend_existing`
:param autoload_with: An :class:`_engine.Engine` or
:class:`_engine.Connection` object,
or a :class:`_reflection.Inspector` object as returned by
:func:`_sa.inspect`
against one, with which this :class:`_schema.Table`
object will be reflected.
When set to a non-None value, the autoload process will take place
for this table against the given engine or connection.
.. seealso::
:ref:`metadata_reflection_toplevel`
:meth:`_events.DDLEvents.column_reflect`
:ref:`metadata_reflection_dbagnostic_types`
:param extend_existing: When ``True``, indicates that if this
:class:`_schema.Table` is already present in the given
:class:`_schema.MetaData`,
apply further arguments within the constructor to the existing
:class:`_schema.Table`.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
:paramref:`_schema.Table.extend_existing`
will also work in conjunction
with :paramref:`_schema.Table.autoload_with` to run a new reflection
operation against the database, even if a :class:`_schema.Table`
of the same name is already present in the target
:class:`_schema.MetaData`; newly reflected :class:`_schema.Column`
objects
and other options will be added into the state of the
:class:`_schema.Table`, potentially overwriting existing columns
and options of the same name.
As is always the case with :paramref:`_schema.Table.autoload_with`,
:class:`_schema.Column` objects can be specified in the same
:class:`_schema.Table`
constructor, which will take precedence. Below, the existing
table ``mytable`` will be augmented with :class:`_schema.Column`
objects
both reflected from the database, as well as the given
:class:`_schema.Column`
named "y"::
Table(
"mytable",
metadata,
Column("y", Integer),
extend_existing=True,
autoload_with=engine,
)
.. seealso::
:paramref:`_schema.Table.autoload_with`
:paramref:`_schema.Table.autoload_replace`
:paramref:`_schema.Table.keep_existing`
:param implicit_returning: True by default - indicates that
RETURNING can be used, typically by the ORM, in order to fetch
server-generated values such as primary key values and
server side defaults, on those backends which support RETURNING.
In modern SQLAlchemy there is generally no reason to alter this
setting, except for some backend specific cases
(see :ref:`mssql_triggers` in the SQL Server dialect documentation
for one such example).
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param resolve_fks: Whether or not to reflect :class:`_schema.Table`
objects
related to this one via :class:`_schema.ForeignKey` objects, when
:paramref:`_schema.Table.autoload_with` is
specified. Defaults to True. Set to False to disable reflection of
related tables as :class:`_schema.ForeignKey`
objects are encountered; may be
used either to save on SQL calls or to avoid issues with related tables
that can't be accessed. Note that if a related table is already present
in the :class:`_schema.MetaData` collection, or becomes present later,
a
:class:`_schema.ForeignKey` object associated with this
:class:`_schema.Table` will
resolve to that table normally.
.. seealso::
:paramref:`.MetaData.reflect.resolve_fks`
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`_schema.MetaData`, ignore
further arguments within the constructor to the existing
:class:`_schema.Table`, and return the :class:`_schema.Table`
object as
originally created. This is to allow a function that wishes
to define a new :class:`_schema.Table` on first call, but on
subsequent calls will return the same :class:`_schema.Table`,
without any of the declarations (particularly constraints)
being applied a second time.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
.. seealso::
:paramref:`_schema.Table.extend_existing`
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`_schema.Table` before
the "autoload" process begins. Historically this has been intended
for use with the :meth:`.DDLEvents.column_reflect` event, however
note that this event hook may now be associated with the
:class:`_schema.MetaData` object directly::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
"sometable",
autoload_with=engine,
listeners=[("column_reflect", listen_for_reflect)],
)
.. seealso::
:meth:`_events.DDLEvents.column_reflect`
:param must_exist: When ``True``, indicates that this Table must already
be present in the given :class:`_schema.MetaData` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
.. note:: setting this flag to ``False`` will not provide
case-insensitive behavior for table reflection; table reflection
will always search for a mixed-case name in a case sensitive
fashion. Case insensitive names are specified in SQLAlchemy only
by stating the name with all lower case characters.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The schema name for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
If the owning :class:`_schema.MetaData` of this :class:`_schema.Table`
specifies its
own :paramref:`_schema.MetaData.schema` parameter,
then that schema name will
be applied to this :class:`_schema.Table`
if the schema parameter here is set
to ``None``. To set a blank schema name on a :class:`_schema.Table`
that
would otherwise use the schema set on the owning
:class:`_schema.MetaData`,
specify the special symbol :attr:`.BLANK_SCHEMA`.
The quoting rules for the schema name are the same as those for the
``name`` parameter, in that quoting is applied for reserved words or
case-sensitive names; to enable unconditional quoting for the schema
name, specify the flag ``quote_schema=True`` to the constructor, or use
the :class:`.quoted_name` construct to specify the name.
:param comment: Optional string that will render an SQL comment on table
creation.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form ``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
""" # noqa: E501
if _no_init:
# don't run __init__ from __new__ by default;
# __new__ has a specific place that __init__ is called
return
super().__init__(quoted_name(name, quote))
self.metadata = metadata
if schema is None:
self.schema = metadata.schema
elif schema is BLANK_SCHEMA:
self.schema = None
else:
quote_schema = quote_schema
assert isinstance(schema, str)
self.schema = quoted_name(schema, quote_schema)
self._sentinel_column = None
self._creator_ddl = _creator_ddl
self._dropper_ddl = _dropper_ddl
self.indexes = set()
self.constraints = set()
PrimaryKeyConstraint(
_implicit_generated=True
)._set_parent_with_dispatch(self)
self.foreign_keys = set() # type: ignore
self._extra_dependencies: Set[Table] = set()
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
self.implicit_returning = implicit_returning
_reflect_info = kw.pop("_reflect_info", None)
self.comment = comment
if info is not None:
self.info = info
if listeners is not None:
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = prefixes if prefixes else []
self._extra_kwargs(**kw)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload_with is not None:
self._autoload(
metadata,
autoload_with,
include_columns,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
resolve_fks=resolve_fks,
)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(
*args,
allow_replacements=extend_existing
or keep_existing
or autoload_with,
all_names={},
)
def set_creator_ddl(self, ddl: TableCreateDDL) -> None:
"""Set the table create DDL for this :class:`.Table`.
This allows the CREATE TABLE statement to be controlled or replaced
entirely when :meth:`.Table.create` or :meth:`.MetaData.create_all` is
used.
E.g.::
from sqlalchemy.schema import CreateTable
table.set_creator_ddl(CreateTable(table, if_not_exists=True))
.. versionadded:: 2.1
.. seealso::
:meth:`.Table.set_dropper_ddl`
"""
self._creator_ddl = ddl
def set_dropper_ddl(self, ddl: TableDropDDL) -> None:
"""Set the table drop DDL for this :class:`.Table`.
This allows the DROP TABLE statement to be controlled or replaced
entirely when :meth:`.Table.drop` or :meth:`.MetaData.drop_all` is
used.
E.g.::
from sqlalchemy.schema import DropTable
table.set_dropper_ddl(DropTable(table, if_exists=True))
.. versionadded:: 2.1
.. seealso::
:meth:`.Table.set_creator_ddl`
"""
self._dropper_ddl = ddl
@property
def is_view(self) -> bool:
"""True if this table, when DDL for CREATE is emitted, will emit
CREATE VIEW rather than CREATE TABLE.
.. versionadded:: 2.1
"""
return isinstance(self._creator_ddl, ddl.CreateView)
def _autoload(
self,
metadata: MetaData,
autoload_with: Union[Engine, Connection],
include_columns: Optional[Collection[str]],
exclude_columns: Collection[str] = (),
resolve_fks: bool = True,
_extend_on: Optional[Set[Table]] = None,
_reflect_info: _ReflectionInfo | None = None,
) -> None:
insp = inspection.inspect(autoload_with)
with insp._inspection_context() as conn_insp:
conn_insp.reflect_table(
self,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
)
@property
def _sorted_constraints(self) -> List[Constraint]:
"""Return the set of constraints as a list, sorted by creation
order.
"""
return sorted(self.constraints, key=lambda c: c._creation_order)
@property
def foreign_key_constraints(self) -> Set[ForeignKeyConstraint]:
""":class:`_schema.ForeignKeyConstraint` objects referred to by this
:class:`_schema.Table`.
This list is produced from the collection of
:class:`_schema.ForeignKey`
objects currently associated.
.. seealso::
:attr:`_schema.Table.constraints`
:attr:`_schema.Table.foreign_keys`
:attr:`_schema.Table.indexes`
"""
return {
fkc.constraint
for fkc in self.foreign_keys
if fkc.constraint is not None
}
def _init_existing(self, *args: Any, **kwargs: Any) -> None:
autoload_with = kwargs.pop("autoload_with", None)
autoload = kwargs.pop("autoload", autoload_with is not None)
autoload_replace = kwargs.pop("autoload_replace", True)
schema = kwargs.pop("schema", None)
_extend_on = kwargs.pop("_extend_on", None)
_reflect_info = kwargs.pop("_reflect_info", None)
# these arguments are only used with _init()
extend_existing = kwargs.pop("extend_existing", False)
keep_existing = kwargs.pop("keep_existing", False)
assert extend_existing
assert not keep_existing
if schema and schema != self.schema:
raise exc.ArgumentError(
f"Can't change schema of existing table "
f"from '{self.schema}' to '{schema}'",
)
include_columns = kwargs.pop("include_columns", None)
if include_columns is not None:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
resolve_fks = kwargs.pop("resolve_fks", True)
for key in ("quote", "quote_schema"):
if key in kwargs:
raise exc.ArgumentError(
"Can't redefine 'quote' or 'quote_schema' arguments"
)
# update `self` with these kwargs, if provided
self.comment = kwargs.pop("comment", self.comment)
self.implicit_returning = kwargs.pop(
"implicit_returning", self.implicit_returning
)
self.info = kwargs.pop("info", self.info)
exclude_columns: _typing_Sequence[str]
if autoload:
if not autoload_replace:
# don't replace columns already present.
# we'd like to do this for constraints also however we don't
# have simple de-duping for unnamed constraints.
exclude_columns = [c.name for c in self.c]
else:
exclude_columns = ()
self._autoload(
self.metadata,
autoload_with,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
_reflect_info=_reflect_info,
)
all_names = {c.name: c for c in self.c}
self._extra_kwargs(**kwargs)
self._init_items(*args, allow_replacements=True, all_names=all_names)
def _extra_kwargs(self, **kwargs: Any) -> None:
self._validate_dialect_kwargs(kwargs)
def _init_collections(self) -> None:
pass
def _reset_exported(self) -> None:
pass
@util.ro_non_memoized_property
def _autoincrement_column(self) -> Optional[Column[int]]:
return self.primary_key._autoincrement_column
@util.ro_memoized_property
def _sentinel_column_characteristics(
self,
) -> _SentinelColumnCharacterization:
"""determine a candidate column (or columns, in case of a client
generated composite primary key) which can be used as an
"insert sentinel" for an INSERT statement.
The returned structure, :class:`_SentinelColumnCharacterization`,
includes all the details needed by :class:`.Dialect` and
:class:`.SQLCompiler` to determine if these column(s) can be used
as an INSERT..RETURNING sentinel for a particular database
dialect.
.. versionadded:: 2.0.10
"""
sentinel_is_explicit = False
sentinel_is_autoinc = False
the_sentinel: Optional[_typing_Sequence[Column[Any]]] = None
# see if a column was explicitly marked "insert_sentinel=True".
explicit_sentinel_col = self._sentinel_column
if explicit_sentinel_col is not None:
the_sentinel = (explicit_sentinel_col,)
sentinel_is_explicit = True
autoinc_col = self._autoincrement_column
if sentinel_is_explicit and explicit_sentinel_col is autoinc_col:
assert autoinc_col is not None
sentinel_is_autoinc = True
elif explicit_sentinel_col is None and autoinc_col is not None:
the_sentinel = (autoinc_col,)
sentinel_is_autoinc = True
default_characterization = _SentinelDefaultCharacterization.UNKNOWN
if the_sentinel:
the_sentinel_zero = the_sentinel[0]
if the_sentinel_zero.identity:
if the_sentinel_zero.identity._increment_is_negative:
if sentinel_is_explicit:
raise exc.InvalidRequestError(
"Can't use IDENTITY default with negative "
"increment as an explicit sentinel column"
)
else:
if sentinel_is_autoinc:
autoinc_col = None
sentinel_is_autoinc = False
the_sentinel = None
else:
default_characterization = (
_SentinelDefaultCharacterization.IDENTITY
)
elif (
the_sentinel_zero.default is None
and the_sentinel_zero.server_default is None
):
if the_sentinel_zero.nullable:
raise exc.InvalidRequestError(
f"Column {the_sentinel_zero} has been marked as a "
"sentinel "
"column with no default generation function; it "
"at least needs to be marked nullable=False assuming "
"user-populated sentinel values will be used."
)
default_characterization = (
_SentinelDefaultCharacterization.NONE
)
elif the_sentinel_zero.default is not None:
if the_sentinel_zero.default.is_sentinel:
default_characterization = (
_SentinelDefaultCharacterization.SENTINEL_DEFAULT
)
elif default_is_sequence(the_sentinel_zero.default):
if the_sentinel_zero.default._increment_is_negative:
if sentinel_is_explicit:
raise exc.InvalidRequestError(
"Can't use SEQUENCE default with negative "
"increment as an explicit sentinel column"
)
else:
if sentinel_is_autoinc:
autoinc_col = None
sentinel_is_autoinc = False
the_sentinel = None
default_characterization = (
_SentinelDefaultCharacterization.SEQUENCE
)
elif the_sentinel_zero.default.is_callable:
default_characterization = (
_SentinelDefaultCharacterization.CLIENTSIDE
)
elif the_sentinel_zero.server_default is not None:
if sentinel_is_explicit:
raise exc.InvalidRequestError(
f"Column {the_sentinel[0]} can't be a sentinel column "
"because it uses an explicit server side default "
"that's not the Identity() default."
)
default_characterization = (
_SentinelDefaultCharacterization.SERVERSIDE
)
if the_sentinel is None and self.primary_key:
assert autoinc_col is None
# determine for non-autoincrement pk if all elements are
# client side
for _pkc in self.primary_key:
if _pkc.server_default is not None or (
_pkc.default and not _pkc.default.is_callable
):
break
else:
the_sentinel = tuple(self.primary_key)
default_characterization = (
_SentinelDefaultCharacterization.CLIENTSIDE
)
return _SentinelColumnCharacterization(
the_sentinel,
sentinel_is_explicit,
sentinel_is_autoinc,
default_characterization,
)
@property
def autoincrement_column(self) -> Optional[Column[int]]:
"""Returns the :class:`.Column` object which currently represents
the "auto increment" column, if any, else returns None.
This is based on the rules for :class:`.Column` as defined by the
:paramref:`.Column.autoincrement` parameter, which generally means the
column within a single integer column primary key constraint that is
not constrained by a foreign key. If the table does not have such
a primary key constraint, then there's no "autoincrement" column.
A :class:`.Table` may have only one column defined as the
"autoincrement" column.
.. versionadded:: 2.0.4
.. seealso::
:paramref:`.Column.autoincrement`
"""
return self._autoincrement_column
@property
def key(self) -> str:
"""Return the 'key' for this :class:`_schema.Table`.
This value is used as the dictionary key within the
:attr:`_schema.MetaData.tables` collection. It is typically the same
as that of :attr:`_schema.Table.name` for a table with no
:attr:`_schema.Table.schema`
set; otherwise it is typically of the form
``schemaname.tablename``.
"""
return _get_table_key(self.name, self.schema)
def __repr__(self) -> str:
return "Table(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.metadata)]
+ [repr(x) for x in self.columns]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in ["schema"]]
)
def __str__(self) -> str:
return _get_table_key(self.description, self.schema)
def add_is_dependent_on(self, table: Table) -> None:
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def _insert_col_impl(
self,
column: ColumnClause[Any],
*,
index: Optional[int] = None,
replace_existing: bool = False,
) -> None:
try:
column._set_parent_with_dispatch(
self,
allow_replacements=replace_existing,
all_names={c.name: c for c in self.c},
index=index,
)
except exc.DuplicateColumnError as de:
raise exc.DuplicateColumnError(
f"{de.args[0]} Specify replace_existing=True to "
"Table.append_column() or Table.insert_column() to replace an "
"existing column."
) from de
def insert_column(
self,
column: ColumnClause[Any],
index: int,
*,
replace_existing: bool = False,
) -> None:
"""Insert a :class:`_schema.Column` to this :class:`_schema.Table` at
a specific position.
Behavior is identical to :meth:`.Table.append_column` except that
the index position can be controlled using the
:paramref:`.Table.insert_column.index`
parameter.
:param replace_existing:
see :paramref:`.Table.append_column.replace_existing`
:param index: integer index to insert the new column.
.. versionadded:: 2.1
"""
self._insert_col_impl(
column, index=index, replace_existing=replace_existing
)
def append_column(
self, column: ColumnClause[Any], *, replace_existing: bool = False
) -> None:
"""Append a :class:`_schema.Column` to this :class:`_schema.Table`.
The "key" of the newly added :class:`_schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`_schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`_schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
:param replace_existing: When ``True``, allows replacing existing
columns. When ``False``, the default, an warning will be raised
if a column with the same ``.key`` already exists. A future
version of sqlalchemy will instead rise a warning.
.. versionadded:: 1.4.0
.. seealso::
:meth:`.Table.insert_column`
"""
self._insert_col_impl(column, replace_existing=replace_existing)
def append_constraint(self, constraint: Union[Index, Constraint]) -> None:
"""Append a :class:`_schema.Constraint` to this
:class:`_schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given
:class:`_schema.Constraint` object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the
:class:`.AddConstraint` construct which can produce this SQL when
invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
metadata = parent
assert isinstance(metadata, MetaData)
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
def create(
self,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.TYPES,
) -> None:
"""Issue a ``CREATE`` statement for this
:class:`_schema.Table`, using the given
:class:`.Connection` or :class:`.Engine`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
# the default is to only check for schema objects
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(
self,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.NONE,
) -> None:
"""Issue a ``DROP`` statement for this
:class:`_schema.Table`, using the given
:class:`.Connection` or :class:`.Engine` for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
@util.deprecated(
"1.4",
":meth:`_schema.Table.tometadata` is renamed to "
":meth:`_schema.Table.to_metadata`",
)
def tometadata(
self,
metadata: MetaData,
schema: Union[str, Literal[SchemaConst.RETAIN_SCHEMA]] = RETAIN_SCHEMA,
referred_schema_fn: Optional[
Callable[
[Table, Optional[str], ForeignKeyConstraint, Optional[str]],
Optional[str],
]
] = None,
name: Optional[str] = None,
) -> Table:
"""Return a copy of this :class:`_schema.Table`
associated with a different
:class:`_schema.MetaData`.
See :meth:`_schema.Table.to_metadata` for a full description.
"""
return self.to_metadata(
metadata,
schema=schema,
referred_schema_fn=referred_schema_fn,
name=name,
)
def to_metadata(
self,
metadata: MetaData,
schema: Union[str, Literal[SchemaConst.RETAIN_SCHEMA]] = RETAIN_SCHEMA,
referred_schema_fn: Optional[
Callable[
[Table, Optional[str], ForeignKeyConstraint, Optional[str]],
Optional[str],
]
] = None,
name: Optional[str] = None,
) -> Table:
"""Return a copy of this :class:`_schema.Table` associated with a
different :class:`_schema.MetaData`.
E.g.::
m1 = MetaData()
user = Table("user", m1, Column("id", Integer, primary_key=True))
m2 = MetaData()
user_copy = user.to_metadata(m2)
.. versionchanged:: 1.4 The :meth:`_schema.Table.to_metadata` function
was renamed from :meth:`_schema.Table.tometadata`.
:param metadata: Target :class:`_schema.MetaData` object,
into which the
new :class:`_schema.Table` object will be created.
:param schema: optional string name indicating the target schema.
Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates
that no change to the schema name should be made in the new
:class:`_schema.Table`. If set to a string name, the new
:class:`_schema.Table`
will have this new name as the ``.schema``. If set to ``None``, the
schema will be set to that of the schema set on the target
:class:`_schema.MetaData`, which is typically ``None`` as well,
unless
set explicitly::
m2 = MetaData(schema="newschema")
# user_copy_one will have "newschema" as the schema name
user_copy_one = user.to_metadata(m2, schema=None)
m3 = MetaData() # schema defaults to None
# user_copy_two will have None as the schema name
user_copy_two = user.to_metadata(m3, schema=None)
:param referred_schema_fn: optional callable which can be supplied
in order to provide for the schema name that should be assigned
to the referenced table of a :class:`_schema.ForeignKeyConstraint`.
The callable accepts this parent :class:`_schema.Table`, the
target schema that we are changing to, the
:class:`_schema.ForeignKeyConstraint` object, and the existing
"target schema" of that constraint. The function should return the
string schema name that should be applied. To reset the schema
to "none", return the symbol :data:`.BLANK_SCHEMA`. To effect no
change, return ``None`` or :data:`.RETAIN_SCHEMA`.
.. versionchanged:: 1.4.33 The ``referred_schema_fn`` function
may return the :data:`.BLANK_SCHEMA` or :data:`.RETAIN_SCHEMA`
symbols.
E.g.::
def referred_schema_fn(table, to_schema, constraint, referred_schema):
if referred_schema == "base_tables":
return referred_schema
else:
return to_schema
new_table = table.to_metadata(
m2, schema="alt_schema", referred_schema_fn=referred_schema_fn
)
:param name: optional string name indicating the target table name.
If not specified or None, the table name is retained. This allows
a :class:`_schema.Table` to be copied to the same
:class:`_schema.MetaData` target
with a new name.
""" # noqa: E501
if name is None:
name = self.name
actual_schema: Optional[str]
if schema is RETAIN_SCHEMA:
actual_schema = self.schema
elif schema is None:
actual_schema = metadata.schema
else:
actual_schema = schema
key = _get_table_key(name, actual_schema)
if key in metadata.tables:
util.warn(
f"Table '{self.description}' already exists within the given "
"MetaData - not copying."
)
return metadata.tables[key]
args = []
for col in self.columns:
args.append(col._copy(schema=actual_schema, _to_metadata=metadata))
table = Table(
name,
metadata,
schema=actual_schema,
comment=self.comment,
*args,
**self.kwargs,
)
if self._creator_ddl is not None:
table._creator_ddl = self._creator_ddl.to_metadata(metadata, table)
if self._dropper_ddl is not None:
table._dropper_ddl = self._dropper_ddl.to_metadata(metadata, table)
for const in self.constraints:
if isinstance(const, ForeignKeyConstraint):
referred_schema = const._referred_schema
if referred_schema_fn:
fk_constraint_schema = referred_schema_fn(
self, actual_schema, const, referred_schema
)
else:
fk_constraint_schema = (
actual_schema
if referred_schema == self.schema
else None
)
table.append_constraint(
const._copy(
schema=fk_constraint_schema, target_table=table
)
)
elif not const._type_bound:
# skip unique constraints that would be generated
# by the 'unique' flag on Column
if const._column_flag:
continue
table.append_constraint(
const._copy(schema=actual_schema, target_table=table)
)
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if index._column_flag:
continue
Index(
index.name,
unique=index.unique,
*[
_copy_expression(expr, self, table)
for expr in index._table_bound_expressions
],
_table=table,
**index.kwargs,
)
return self._schema_item_copy(table)
| Table |
python | simonw__datasette | datasette/app.py | {
"start": 77147,
"end": 88408
} | class ____:
def __init__(self, datasette, routes):
self.ds = datasette
self.routes = routes or []
async def __call__(self, scope, receive, send):
# Because we care about "foo/bar" v.s. "foo%2Fbar" we decode raw_path ourselves
path = scope["path"]
raw_path = scope.get("raw_path")
if raw_path:
path = raw_path.decode("ascii")
path = path.partition("?")[0]
return await self.route_path(scope, receive, send, path)
async def route_path(self, scope, receive, send, path):
# Strip off base_url if present before routing
base_url = self.ds.setting("base_url")
if base_url != "/" and path.startswith(base_url):
path = "/" + path[len(base_url) :]
scope = dict(scope, route_path=path)
request = Request(scope, receive)
# Populate request_messages if ds_messages cookie is present
try:
request._messages = self.ds.unsign(
request.cookies.get("ds_messages", ""), "messages"
)
except BadSignature:
pass
scope_modifications = {}
# Apply force_https_urls, if set
if (
self.ds.setting("force_https_urls")
and scope["type"] == "http"
and scope.get("scheme") != "https"
):
scope_modifications["scheme"] = "https"
# Handle authentication
default_actor = scope.get("actor") or None
actor = None
for actor in pm.hook.actor_from_request(datasette=self.ds, request=request):
actor = await await_me_maybe(actor)
if actor:
break
scope_modifications["actor"] = actor or default_actor
scope = dict(scope, **scope_modifications)
match, view = resolve_routes(self.routes, path)
if match is None:
return await self.handle_404(request, send)
new_scope = dict(scope, url_route={"kwargs": match.groupdict()})
request.scope = new_scope
try:
response = await view(request, send)
if response:
self.ds._write_messages_to_response(request, response)
await response.asgi_send(send)
return
except NotFound as exception:
return await self.handle_404(request, send, exception)
except Forbidden as exception:
# Try the forbidden() plugin hook
for custom_response in pm.hook.forbidden(
datasette=self.ds, request=request, message=exception.args[0]
):
custom_response = await await_me_maybe(custom_response)
assert (
custom_response
), "Default forbidden() hook should have been called"
return await custom_response.asgi_send(send)
except Exception as exception:
return await self.handle_exception(request, send, exception)
async def handle_404(self, request, send, exception=None):
# If path contains % encoding, redirect to tilde encoding
if "%" in request.path:
# Try the same path but with "%" replaced by "~"
# and "~" replaced with "~7E"
# and "." replaced with "~2E"
new_path = (
request.path.replace("~", "~7E").replace("%", "~").replace(".", "~2E")
)
if request.query_string:
new_path += "?{}".format(request.query_string)
await asgi_send_redirect(send, new_path)
return
# If URL has a trailing slash, redirect to URL without it
path = request.scope.get(
"raw_path", request.scope["path"].encode("utf8")
).partition(b"?")[0]
context = {}
if path.endswith(b"/"):
path = path.rstrip(b"/")
if request.scope["query_string"]:
path += b"?" + request.scope["query_string"]
await asgi_send_redirect(send, path.decode("latin1"))
else:
# Is there a pages/* template matching this path?
route_path = request.scope.get("route_path", request.scope["path"])
# Jinja requires template names to use "/" even on Windows
template_name = "pages" + route_path + ".html"
# Build a list of pages/blah/{name}.html matching expressions
environment = self.ds.get_jinja_environment(request)
pattern_templates = [
filepath
for filepath in environment.list_templates()
if "{" in filepath and filepath.startswith("pages/")
]
page_routes = [
(route_pattern_from_filepath(filepath[len("pages/") :]), filepath)
for filepath in pattern_templates
]
try:
template = environment.select_template([template_name])
except TemplateNotFound:
template = None
if template is None:
# Try for a pages/blah/{name}.html template match
for regex, wildcard_template in page_routes:
match = regex.match(route_path)
if match is not None:
context.update(match.groupdict())
template = wildcard_template
break
if template:
headers = {}
status = [200]
def custom_header(name, value):
headers[name] = value
return ""
def custom_status(code):
status[0] = code
return ""
def custom_redirect(location, code=302):
status[0] = code
headers["Location"] = location
return ""
def raise_404(message=""):
raise NotFoundExplicit(message)
context.update(
{
"custom_header": custom_header,
"custom_status": custom_status,
"custom_redirect": custom_redirect,
"raise_404": raise_404,
}
)
try:
body = await self.ds.render_template(
template,
context,
request=request,
view_name="page",
)
except NotFoundExplicit as e:
await self.handle_exception(request, send, e)
return
# Pull content-type out into separate parameter
content_type = "text/html; charset=utf-8"
matches = [k for k in headers if k.lower() == "content-type"]
if matches:
content_type = headers[matches[0]]
await asgi_send(
send,
body,
status=status[0],
headers=headers,
content_type=content_type,
)
else:
await self.handle_exception(request, send, exception or NotFound("404"))
async def handle_exception(self, request, send, exception):
responses = []
for hook in pm.hook.handle_exception(
datasette=self.ds,
request=request,
exception=exception,
):
response = await await_me_maybe(hook)
if response is not None:
responses.append(response)
assert responses, "Default exception handler should have returned something"
# Even if there are multiple responses use just the first one
response = responses[0]
await response.asgi_send(send)
_cleaner_task_str_re = re.compile(r"\S*site-packages/")
def _cleaner_task_str(task):
s = str(task)
# This has something like the following in it:
# running at /Users/simonw/Dropbox/Development/datasette/venv-3.7.5/lib/python3.7/site-packages/uvicorn/main.py:361>
# Clean up everything up to and including site-packages
return _cleaner_task_str_re.sub("", s)
def wrap_view(view_fn_or_class, datasette):
is_function = isinstance(view_fn_or_class, types.FunctionType)
if is_function:
return wrap_view_function(view_fn_or_class, datasette)
else:
if not isinstance(view_fn_or_class, type):
raise ValueError("view_fn_or_class must be a function or a class")
return wrap_view_class(view_fn_or_class, datasette)
def wrap_view_class(view_class, datasette):
async def async_view_for_class(request, send):
instance = view_class()
if inspect.iscoroutinefunction(instance.__call__):
return await async_call_with_supported_arguments(
instance.__call__,
scope=request.scope,
receive=request.receive,
send=send,
request=request,
datasette=datasette,
)
else:
return call_with_supported_arguments(
instance.__call__,
scope=request.scope,
receive=request.receive,
send=send,
request=request,
datasette=datasette,
)
async_view_for_class.view_class = view_class
return async_view_for_class
def wrap_view_function(view_fn, datasette):
@functools.wraps(view_fn)
async def async_view_fn(request, send):
if inspect.iscoroutinefunction(view_fn):
response = await async_call_with_supported_arguments(
view_fn,
scope=request.scope,
receive=request.receive,
send=send,
request=request,
datasette=datasette,
)
else:
response = call_with_supported_arguments(
view_fn,
scope=request.scope,
receive=request.receive,
send=send,
request=request,
datasette=datasette,
)
if response is not None:
return response
return async_view_fn
def permanent_redirect(path, forward_query_string=False, forward_rest=False):
return wrap_view(
lambda request, send: Response.redirect(
path
+ (request.url_vars["rest"] if forward_rest else "")
+ (
("?" + request.query_string)
if forward_query_string and request.query_string
else ""
),
status=301,
),
datasette=None,
)
_curly_re = re.compile(r"({.*?})")
def route_pattern_from_filepath(filepath):
# Drop the ".html" suffix
if filepath.endswith(".html"):
filepath = filepath[: -len(".html")]
re_bits = ["/"]
for bit in _curly_re.split(filepath):
if _curly_re.match(bit):
re_bits.append(f"(?P<{bit[1:-1]}>[^/]*)")
else:
re_bits.append(re.escape(bit))
return re.compile("^" + "".join(re_bits) + "$")
| DatasetteRouter |
python | astropy__astropy | astropy/timeseries/binned.py | {
"start": 408,
"end": 16645
} | class ____(BaseTimeSeries):
"""
A class to represent binned time series data in tabular form.
`~astropy.timeseries.BinnedTimeSeries` provides a class for
representing time series as a collection of values of different
quantities measured in time bins (for time series with values
sampled at specific times, see the `~astropy.timeseries.TimeSeries`
class). `~astropy.timeseries.BinnedTimeSeries` is a sub-class of
`~astropy.table.QTable` and thus provides all the standard table
maniplation methods available to tables, but it also provides
additional conveniences for dealing with time series, such as a
flexible initializer for setting up the times, and attributes to
access the start/center/end time of bins.
See also: https://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize time series. This does not need to contain the
times, which can be provided separately, but if it does contain the
times they should be in columns called ``'time_bin_start'`` and
``'time_bin_size'`` to be automatically recognized.
time_bin_start : `~astropy.time.Time` or iterable
The times of the start of each bin - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. If this is given, then
the remaining time-related arguments should not be used. This can also
be a scalar value if ``time_bin_size`` is provided.
time_bin_end : `~astropy.time.Time` or iterable
The times of the end of each bin - this can be either given directly
as a `~astropy.time.Time` array or as any value or iterable that
initializes the `~astropy.time.Time` class. If this is given, then the
remaining time-related arguments should not be used. This can only be
given if ``time_bin_start`` is an array of values. If ``time_bin_end``
is a scalar, time bins are assumed to be contiguous, such that the end
of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array, the
time bins do not need to be contiguous. If this argument is provided,
``time_bin_size`` should not be provided.
time_bin_size : `~astropy.time.TimeDelta` or `~astropy.units.Quantity`
The size of the time bins, either as a scalar value (in which case all
time bins will be assumed to have the same duration) or as an array of
values (in which case each time bin can have a different duration).
If this argument is provided, ``time_bin_end`` should not be provided.
n_bins : int
The number of time bins for the series. This is only used if both
``time_bin_start`` and ``time_bin_size`` are provided and are scalar
values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ["time_bin_start", "time_bin_size"]
def __init__(
self,
data=None,
*,
time_bin_start=None,
time_bin_end=None,
time_bin_size=None,
n_bins=None,
**kwargs,
):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if (
data is None
and time_bin_start is None
and time_bin_end is None
and time_bin_size is None
and n_bins is None
):
self._required_columns_relax = True
return
# First if time_bin_start and time_bin_end have been given in the table data, we
# should extract them and treat them as if they had been passed as
# keyword arguments.
if "time_bin_start" in self.colnames:
if time_bin_start is None:
time_bin_start = self.columns["time_bin_start"]
else:
raise TypeError(
"'time_bin_start' has been given both in the table "
"and as a keyword argument"
)
if "time_bin_size" in self.colnames:
if time_bin_size is None:
time_bin_size = self.columns["time_bin_size"]
else:
raise TypeError(
"'time_bin_size' has been given both in the table "
"and as a keyword argument"
)
if time_bin_start is None:
raise TypeError("'time_bin_start' has not been specified")
if time_bin_end is None and time_bin_size is None:
raise TypeError(
"Either 'time_bin_size' or 'time_bin_end' should be specified"
)
if not isinstance(time_bin_start, (Time, TimeDelta)):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)):
time_bin_end = Time(time_bin_end)
if time_bin_size is not None and not isinstance(
time_bin_size, (Quantity, TimeDelta)
):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if isinstance(time_bin_size, TimeDelta):
time_bin_size = time_bin_size.sec * u.s
if n_bins is not None and time_bin_size is not None:
if not (time_bin_start.isscalar and time_bin_size.isscalar):
raise TypeError(
"'n_bins' cannot be specified if 'time_bin_start' or "
"'time_bin_size' are not scalar'"
)
if time_bin_start.isscalar:
# We interpret this as meaning that this is the start of the
# first bin and that the bins are contiguous. In this case,
# we require time_bin_size to be specified.
if time_bin_size is None:
raise TypeError(
"'time_bin_start' is scalar, so 'time_bin_size' is required"
)
if time_bin_size.isscalar:
if data is not None:
if n_bins is not None:
if n_bins != len(self):
raise TypeError(
"'n_bins' has been given and it is not the "
"same length as the input data."
)
else:
n_bins = len(self)
time_bin_size = np.repeat(time_bin_size, n_bins)
time_delta = np.cumsum(time_bin_size)
time_bin_end = time_bin_start + time_delta
# Now shift the array so that the first entry is 0
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0.0 * u.s
# Make time_bin_start into an array
time_bin_start = time_bin_start + time_delta
else:
if len(self.colnames) > 0 and len(time_bin_start) != len(self):
raise ValueError(
f"Length of 'time_bin_start' ({len(time_bin_start)}) should match "
f"table length ({len(self)})"
)
if time_bin_end is not None:
if time_bin_end.isscalar:
times = time_bin_start.copy()
times[:-1] = times[1:]
times[-1] = time_bin_end
time_bin_end = times
time_bin_size = (time_bin_end - time_bin_start).sec * u.s
if time_bin_size.isscalar:
time_bin_size = np.repeat(time_bin_size, len(self))
with self._delay_required_column_checks():
if "time_bin_start" in self.colnames:
self.remove_column("time_bin_start")
if "time_bin_size" in self.colnames:
self.remove_column("time_bin_size")
self.add_column(time_bin_start, index=0, name="time_bin_start")
self.add_index("time_bin_start")
self.add_column(time_bin_size, index=1, name="time_bin_size")
@property
def time_bin_start(self):
"""
The start times of all the time bins.
"""
return self["time_bin_start"]
@property
def time_bin_center(self):
"""
The center times of all the time bins.
"""
return self["time_bin_start"] + self["time_bin_size"] * 0.5
@property
def time_bin_end(self):
"""
The end times of all the time bins.
"""
return self["time_bin_start"] + self["time_bin_size"]
@property
def time_bin_size(self):
"""
The sizes of all the time bins.
"""
return self["time_bin_size"]
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if "time_bin_start" not in item or "time_bin_size" not in item:
out = QTable(
[self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices,
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
return out
return super().__getitem__(item)
@classmethod
def read(
cls,
filename,
time_bin_start_column=None,
time_bin_end_column=None,
time_bin_size_column=None,
time_bin_size_unit=None,
time_format=None,
time_scale=None,
format=None,
*args,
**kwargs,
):
"""
Read and parse a file and returns a `astropy.timeseries.BinnedTimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(https://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.BinnedTimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries.binned import BinnedTimeSeries
>>> ts = BinnedTimeSeries.read('binned.dat', format='ascii.ecsv',
... time_bin_start_column='date_start',
... time_bin_end_column='date_end') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_bin_start_column : str
The name of the column with the start time for each bin.
time_bin_end_column : str, optional
The name of the column with the end time for each bin. Either this
option or ``time_bin_size_column`` should be specified.
time_bin_size_column : str, optional
The name of the column with the size for each bin. Either this
option or ``time_bin_end_column`` should be specified.
time_bin_size_unit : `astropy.units.Unit`, optional
If ``time_bin_size_column`` is specified but does not have a unit
set in the table, you can specify the unit manually.
time_format : str, optional
The time format for the start and end columns.
time_scale : str, optional
The time scale for the start and end columns.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.binned.BinnedTimeSeries`
BinnedTimeSeries corresponding to the file.
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, *args, format=format, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_bin_start_column is None:
raise ValueError(
"``time_bin_start_column`` should be provided since the default"
" Table readers are being used."
)
if time_bin_end_column is None and time_bin_size_column is None:
raise ValueError(
"Either `time_bin_end_column` or `time_bin_size_column` should be"
" provided."
)
elif time_bin_end_column is not None and time_bin_size_column is not None:
raise ValueError(
"Cannot specify both `time_bin_end_column` and"
" `time_bin_size_column`."
)
table = Table.read(filename, *args, format=format, **kwargs)
if time_bin_start_column in table.colnames:
time_bin_start = Time(
table.columns[time_bin_start_column],
scale=time_scale,
format=time_format,
)
table.remove_column(time_bin_start_column)
else:
raise ValueError(
f"Bin start time column '{time_bin_start_column}' not found in the"
" input data."
)
if time_bin_end_column is not None:
if time_bin_end_column in table.colnames:
time_bin_end = Time(
table.columns[time_bin_end_column],
scale=time_scale,
format=time_format,
)
table.remove_column(time_bin_end_column)
else:
raise ValueError(
f"Bin end time column '{time_bin_end_column}' not found in the"
" input data."
)
time_bin_size = None
elif time_bin_size_column is not None:
if time_bin_size_column in table.colnames:
time_bin_size = table.columns[time_bin_size_column]
table.remove_column(time_bin_size_column)
else:
raise ValueError(
f"Bin size column '{time_bin_size_column}' not found in the"
" input data."
)
if time_bin_size.unit is None:
if time_bin_size_unit is None or not isinstance(
time_bin_size_unit, u.UnitBase
):
raise ValueError(
"The bin size unit should be specified as an astropy Unit"
" using ``time_bin_size_unit``."
)
time_bin_size = time_bin_size * time_bin_size_unit
else:
time_bin_size = u.Quantity(time_bin_size)
time_bin_end = None
if time_bin_start.isscalar and time_bin_size.isscalar:
return cls(
data=table,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
time_bin_size=time_bin_size,
n_bins=len(table),
)
else:
return cls(
data=table,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
time_bin_size=time_bin_size,
)
| BinnedTimeSeries |
python | tensorflow__tensorflow | tensorflow/python/framework/composite_tensor_test.py | {
"start": 3232,
"end": 17464
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
{'structure': CT(0),
'expected': [0],
'paths': [('CT',)]},
{'structure': CT('a'),
'expected': ['a'],
'paths': [('CT',)]},
{'structure': CT(['a', 'b', 'c']),
'expected': ['a', 'b', 'c'],
'paths': [('CT', 0), ('CT', 1), ('CT', 2)]},
{'structure': CT({'x': 'a', 'y': 'b', 'z': 'c'}),
'expected': ['a', 'b', 'c'],
'paths': [('CT', 'x'), ('CT', 'y'), ('CT', 'z')]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'expected': ['a', 'b', 'c'],
'paths': [(0, 'k1', 'CT'), (1, 'CT', 0), (1, 'CT', 1, 'x', 'CT', 'y')]},
{'structure': CT(0),
'expand_composites': False,
'expected': [CT(0)],
'paths': [()]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'expand_composites': False,
'expected': [CT('a'), CT(['b', {'x': CT({'y': 'c'})}])],
'paths': [(0, 'k1'), (1,)]},
]) # pyformat: disable
def testNestFlatten(self, structure, expected, paths, expand_composites=True):
result = nest.flatten(structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
result_with_paths = nest.flatten_with_tuple_paths(
structure, expand_composites=expand_composites)
self.assertEqual(result_with_paths, list(zip(paths, expected)))
string_paths = ['/'.join(str(p) for p in path) for path in paths] # pylint: disable=g-complex-comprehension
result_with_string_paths = nest.flatten_with_joined_string_paths(
structure, expand_composites=expand_composites)
self.assertEqual(result_with_string_paths,
list(zip(string_paths, expected)))
flat_paths_result = list(
nest.yield_flat_paths(structure, expand_composites=expand_composites))
self.assertEqual(flat_paths_result, paths)
@parameterized.parameters([
{'s1': [1, 2, 3],
's2': [CT(['a', 'b']), 'c', 'd'],
'expand_composites': False,
'expected': [CT(['a', 'b']), 'c', 'd'],
'paths': [(0,), (1,), (2,)]},
{'s1': [CT([1, 2, 3])],
's2': [5],
'expand_composites': False,
'expected': [5],
'paths': [(0,)]},
{'s1': [[CT([9, 9, 9])], 999, {'y': CT([9, 9])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expand_composites': False,
'expected': [CT([1, 2, 3]), 100, CT([CT([4, 5]), 6])],
'paths': [(0, 0), (1,), (2, 'y')]},
{'s1': [[CT([9, 9, 9])], 999, {'y': CT([CT([9, 9]), 9])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
'expand_composites': False,
'expected': [CT([1, 2, 3]), 100, CT([5, 6])],
'paths': [(0, 0), (1,), (2, 'y')]},
]) # pyformat: disable
def testNestFlattenUpTo(self, s1, s2, expected, paths,
expand_composites=True):
result = nest.flatten_up_to(s1, s2, expand_composites=expand_composites)
self.assertEqual(expected, result)
result_with_paths = nest.flatten_with_tuple_paths_up_to(
s1, s2, expand_composites=expand_composites)
self.assertEqual(result_with_paths, list(zip(paths, expected)))
@parameterized.parameters([
{'structure': CT(0),
'sequence': [5],
'expected': CT(5)},
{'structure': CT(['a', 'b', 'c']),
'sequence': ['A', CT(['b']), {'x': 'y'}],
'expected': CT(['A', CT(['b']), {'x': 'y'}])},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'sequence': ['A', 'B', 'C'],
'expected': [{'k1': CT('A')}, CT(['B', {'x': CT({'y': 'C'})}])]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'sequence': ['A', 'B'],
'expand_composites': False,
'expected': [{'k1': 'A'}, 'B']},
{'structure': CT(0, metadata='abc'),
'sequence': [5],
'expected': CT(5, metadata='abc')},
]) # pyformat: disable
def testNestPackSequenceAs(self,
structure,
sequence,
expected,
expand_composites=True):
result = nest.pack_sequence_as(
structure, sequence, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': CT('abc'), 's2': CT('xyz')},
{'s1': CT(['a', 'b', 'c']), 's2': CT(['d', 'e', 'f'])},
{'s1': [1, CT([10]), CT(200, metadata='xyz')],
's2': [8, CT([55]), CT(100, metadata='xyz')]},
{'s1': CT('abc'), 's2': CT3('xyz')},
{'s1': CT(['a', 'b', 'c']), 's2': CT3(['d', 'e', 'f'])},
{'s1': [1, CT([10]), CT(200, metadata='xyz')],
's2': [8, CT([55]), CT3(100, metadata='xyz')]},
]) # pyformat: disable
def testNestAssertSameStructure(self, s1, s2, expand_composites=True):
nest.assert_same_structure(s1, s2, expand_composites=expand_composites)
nest.assert_shallow_structure(s1, s2, expand_composites=expand_composites)
@parameterized.parameters([
{'s1': CT(0), 's2': CT(['x'])},
{'s1': CT([1]), 's2': CT([1, 2])},
{'s1': CT({'x': 1}), 's2': CT({'y': 1})},
{'s1': CT(0), 's2': CT(0, metadata='xyz')},
{'s1': CT(0, metadata='xyz'), 's2': CT(0)},
{'s1': CT(0, metadata='xyz'), 's2': CT(0, metadata='abc')},
{'s1': CT(['a', 'b', 'c']), 's2': CT(['d', 'e'])},
{'s1': [1, CT(['a']), CT('b', metadata='xyz')],
's2': [8, CT([55, 66]), CT(100, metadata='abc')]},
{'s1': CT(0), 's2': CT2(0)},
]) # pyformat: disable
def testNestAssertSameStructureCompositeMismatch(self,
s1,
s2,
error=ValueError):
# s1 and s2 have the same structure if expand_composites=False; but
# different structures if expand_composites=True.
nest.assert_same_structure(s1, s2, expand_composites=False)
nest.assert_shallow_structure(s1, s2, expand_composites=False)
with self.assertRaises(error): # pylint: disable=g-error-prone-assert-raises
nest.assert_same_structure(s1, s2, expand_composites=True)
@parameterized.parameters([
# Note: there are additional test cases in testNestAssertSameStructure.
{'s1': [1], 's2': [CT(1)]},
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expand_composites': False},
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
'expand_composites': False},
]) # pyformat: disable
def testNestAssertShallowStructure(self, s1, s2, expand_composites=True):
nest.assert_shallow_structure(s1, s2, expand_composites=expand_composites)
@parameterized.parameters([
# Note: there are additional test cases in
# testNestAssertSameStructureCompositeMismatch.
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}]},
{'s1': CT([1, 2, 3]),
's2': [1, 2, 3],
'check_types': False},
]) # pyformat: disable
def testNestAssertShallowStructureCompositeMismatch(self,
s1,
s2,
check_types=True):
with self.assertRaises((TypeError, ValueError)): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(
s1, s2, expand_composites=True, check_types=check_types)
@parameterized.parameters([
{'structure': CT(1, metadata=2),
'expected': CT(11, metadata=2)},
{'structure': CT({'x': 1, 'y': [2, 3]}, metadata=2),
'expected': CT({'x': 11, 'y': [12, 13]}, metadata=2)},
{'structure': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [[CT([11, 12, 13])], 110, {'y': CT([CT([14, 15]), 16])}]},
]) # pyformat: disable
def testNestMapStructure(self, structure, expected, expand_composites=True):
func = lambda x: x + 10
result = nest.map_structure(
func, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': [[CT([1, 2, 3])], 100, {'y': 4}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [[CT([11, 12, 13])], 110, {'y': CT([CT([4, 5]), 6])}]}
]) # pyformat: disable
def testNestMapStructureUpTo(self, s1, s2, expected):
func = lambda x: x + 10 if isinstance(x, int) else x
result = nest.map_structure_up_to(s1, func, s2, expand_composites=True)
self.assertEqual(result, expected)
@parameterized.parameters([
{'structure': CT('a'),
'expected': CT('CT:a')},
{'structure': CT(['a', 'b']),
'expected': CT(['CT/0:a', 'CT/1:b'])},
{'structure': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [
[CT(['0/0/CT/0:1', '0/0/CT/1:2', '0/0/CT/2:3'])],
'1:100',
{'y': CT([CT(['2/y/CT/0/CT/0:4', '2/y/CT/0/CT/1:5']),
'2/y/CT/1:6'])}]},
]) # pyformat: disable
def testNestMapStructureWithPaths(self,
structure,
expected,
expand_composites=True):
def func1(path, x):
return '%s:%s' % (path, x)
result = nest.map_structure_with_paths(
func1, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
# Use the same test cases for map_structure_with_tuple_paths.
def func2(tuple_path, x):
return '%s:%s' % ('/'.join(str(v) for v in tuple_path), x)
result = nest.map_structure_with_tuple_paths(
func2, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': [[CT([1, 2, 3])], 100, {'y': [4, 5]}],
's2': [[CT([1, 2, 3])], 100, {'y': [CT([4, 5]), 6]}],
'expected': [
[CT(['0/0/CT/0:1', '0/0/CT/1:2', '0/0/CT/2:3'])],
('1:100'),
{'y': ['2/y/0:CT((4, 5), None)', '2/y/1:6']}]},
]) # pyformat: disable
def testNestMapStructureWithTuplePathsUpTo(self, s1, s2, expected):
def func(tuple_path, x):
return '%s:%s' % ('/'.join(str(v) for v in tuple_path), x)
result = nest.map_structure_with_tuple_paths_up_to(
s1, func, s2, expand_composites=True)
self.assertEqual(result, expected)
def testNestGetTraverseShallowStructure(self):
func = lambda t: not (isinstance(t, CT) and t.metadata == 'B')
structure = [CT([1, 2], metadata='A'), CT([CT(3)], metadata='B')]
result = nest.get_traverse_shallow_structure(
func, structure, expand_composites=True)
expected = [CT([True, True], metadata='A'), False]
self.assertEqual(result, expected)
def testMemoryIsFreed(self):
# Note: we use `np.array` values for CT and `set` values for
# metadata because we need to construct weakrefs to them. Other builtin
# types, such as `list` and `tuple`, do not support weakrefs.
ct1 = CT(np.array([1, 2]), set(['no', 'leaks']))
ct2 = CT(np.array([3, 4]), set(['no', 'leaks']))
ct3 = CT(np.array([5, 6]), set(['other', 'metadata']))
# Note: map_structure exercises flatten, pack_sequence_as, and
# assert_same_structure.
func = lambda x, y: x + y
ct4 = nest.map_structure(func, ct1, ct2, expand_composites=True)
# Check that the exception-raising path in assert_same_structure
# doesn't leak any objects.
with self.assertRaises(ValueError):
nest.map_structure(func, ct2, ct3, expand_composites=True)
if hasattr(sys, 'exc_clear'):
sys.exc_clear() # Remove any references in exception stack traces.
refs = []
for ct in [ct1, ct2, ct3, ct4]:
refs.append(weakref.ref(ct))
refs.append(weakref.ref(ct.components))
refs.append(weakref.ref(ct.metadata))
del ct # pylint: disable=undefined-loop-variable
for ref in refs:
self.assertIsNotNone(ref())
del ct1, ct2, ct3, ct4
gc.collect()
for ref in refs:
self.assertIsNone(ref())
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
('IndexedSlicesNoDenseShape', lambda: indexed_slices.IndexedSlices(
constant_op.constant([1, 2, 3]), constant_op.constant([2, 8, 4]))),
('IndexedSlicesInt32DenseShape', lambda: indexed_slices.IndexedSlices(
constant_op.constant([1, 2, 3]), constant_op.constant([2, 8, 4]),
constant_op.constant([10], dtypes.int32))),
('IndexedSlicesInt64DenseShape', lambda: indexed_slices.IndexedSlices(
constant_op.constant([[1, 2], [3, 4]]), constant_op.constant([2, 8]),
constant_op.constant([10, 2], dtypes.int64))),
('RaggedTensorRaggedRank1',
lambda: ragged_factory_ops.constant([[1, 2], [3]])),
('RaggedTensorRaggedRank2',
lambda: ragged_factory_ops.constant([[[1, 2], [3]], [[6, 7, 8]]])),
('SparseTensor',
lambda: sparse_tensor.SparseTensor([[3], [7]], ['a', 'b'], [10])),
('Nested structure', lambda: {
'a':
indexed_slices.IndexedSlices(
constant_op.constant([1, 2, 3]),
constant_op.constant([2, 8, 4])),
'b': [
ragged_factory_ops.constant([[1, 2], [3]]),
sparse_tensor.SparseTensor([[3], [7]], ['a', 'b'], [10])
]
}),
])
def testAssertSameStructureWithValueAndTypeSpec(self, value_func):
value = value_func()
spec = nest.map_structure(type_spec.type_spec_from_value, value,
expand_composites=False)
nest.assert_same_structure(value, spec, expand_composites=True)
def testConvertVariablesToTensors(self):
ct = CT(1)
result = ct._convert_variables_to_tensors()
self.assertIs(result, ct)
result2 = composite_tensor.convert_variables_to_tensors(ct)
self.assertIs(result2, ct)
if __name__ == '__main__':
googletest.main()
| CompositeTensorTest |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_not_be_null_and_column_to_not_be_empty.py | {
"start": 1260,
"end": 13504
} | class ____(ColumnMapExpectation):
"""Expect column values to not be null and column to not be empty.
To be counted as an exception, values must be explicitly null or missing, such as a NULL in PostgreSQL or an
np.NaN in pandas. Empty strings don't count as null unless they have been coerced to a null type.
expect_column_values_to_not_be_null_and_column_to_not_be_empty is a \
[Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations).
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[expect_column_values_to_be_null](https://greatexpectations.io/expectations/expect_column_values_to_be_null)
[expect_column_values_not_to_be_null](https://greatexpectations.io/expectations/expect_column_values_not_to_be_null)
"""
min_value: Optional[Comparable] = None
max_value: Optional[Comparable] = None
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "column map expectation"],
"contributors": [
"@tmilitino",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.nonnull"
args_keys = ("column",)
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"not_has_null": ["test", "foo", "for", "bar", "boo"],
"has_null": ["test", "foo", "for", "bar", np.nan],
},
"tests": [
{
"title": "not_null_values",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "not_has_null"},
"out": {
"success": True,
},
},
{
"title": "has_null",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "has_null"},
"out": {
"success": False,
},
},
],
},
{
"data": {"empty_dataframe": []},
"tests": [
{
"title": "not_has_values",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "empty_dataframe"},
"out": {
"success": False,
},
},
],
},
]
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str = "values must not be null, at least $mostly_pct % of the time."
else:
template_str = "values must never be null."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
if params["mostly"] is not None and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
if include_column_name:
template_str = (
"$column values must not be null, at least $mostly_pct % of the time."
)
else:
template_str = "values must not be null, at least $mostly_pct % of the time."
else:
if include_column_name:
template_str = "$column values must never be null."
else:
template_str = "values must never be null."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = f"{conditional_template_str}, then {template_str}"
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type=LegacyDiagnosticRendererType.OBSERVED_VALUE)
def _diagnostic_observed_value_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
result_dict = result.result
try:
null_percent = result_dict["unexpected_percent"]
return num_to_str(100 - null_percent, precision=5, use_locale=True) + "% not null"
except KeyError:
return "unknown % not null"
except TypeError:
return "NaN% not null"
@classmethod
@renderer(renderer_type=LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_MISSING_COUNT_ROW)
def _descriptive_column_properties_table_missing_count_row_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
assert result, "Must pass in result."
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Missing (n)",
"tooltip": {
"content": "expect_column_values_to_not_be_null_and_column_to_not_be_empty"
},
},
}
),
result.result["unexpected_count"]
if "unexpected_count" in result.result and result.result["unexpected_count"] is not None
else "--",
]
@classmethod
@renderer(
renderer_type=LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_MISSING_PERCENT_ROW
)
def _descriptive_column_properties_table_missing_percent_row_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
assert result, "Must pass in result."
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Missing (%)",
"tooltip": {
"content": "expect_column_values_to_not_be_null_and_column_to_not_be_empty"
},
},
}
),
f"{result.result['unexpected_percent']:.1f}%"
if "unexpected_percent" in result.result
and result.result["unexpected_percent"] is not None
else "--",
]
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
result_format = self._get_result_format(runtime_configuration=runtime_configuration)
mostly = self._get_success_kwargs().get("mostly", self._get_default_value("mostly"))
total_count = metrics.get("table.row_count")
unexpected_count = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
)
if total_count is None or total_count == 0:
success = False
else:
success_ratio = (total_count - unexpected_count) / total_count
success = success_ratio >= mostly
nonnull_count = None
return _format_map_output(
result_format=parse_result_format(result_format),
success=success,
element_count=metrics.get("table.row_count"),
nonnull_count=nonnull_count,
unexpected_count=metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
),
unexpected_list=metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}"
),
unexpected_index_list=metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}"
),
unexpected_index_query=metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}"
),
)
if __name__ == "__main__":
ExpectColumnValuesToNotBeNullAndColumnToNotBeEmpty().print_diagnostic_checklist(
show_failed_tests=True
)
| ExpectColumnValuesToNotBeNullAndColumnToNotBeEmpty |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/integrations/snowflake/pandas_and_pyspark.py | {
"start": 406,
"end": 1648
} | class ____(SnowflakeIOManager):
@staticmethod
def type_handlers():
"""type_handlers should return a list of the TypeHandlers that the I/O manager can use.
Here we return the SnowflakePandasTypeHandler and SnowflakePySparkTypeHandler so that the I/O
manager can store Pandas DataFrames and PySpark DataFrames.
"""
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
@staticmethod
def default_load_type() -> Optional[type]:
"""If an asset is not annotated with an return type, default_load_type will be used to
determine which TypeHandler to use to store and load the output.
In this case, unannotated assets will be stored and loaded as Pandas DataFrames.
"""
return pd.DataFrame
defs = Definitions(
assets=[iris_dataset, rose_dataset],
resources={
"io_manager": SnowflakePandasPySparkIOManager(
account="abc1234.us-east-1",
user=EnvVar("SNOWFLAKE_USER"),
password=EnvVar("SNOWFLAKE_PASSWORD"),
database="FLOWERS",
role="writer",
warehouse="PLANTS",
schema="IRIS",
)
},
)
# end_example
| SnowflakePandasPySparkIOManager |
python | Farama-Foundation__Gymnasium | tests/utils/test_env_checker_with_gym.py | {
"start": 281,
"end": 443
} | class ____(gym.Env):
def __init__(self):
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Discrete(2)
| IncorrectEnv |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-s3/source_s3/source.py | {
"start": 199,
"end": 3274
} | class ____(SourceFilesAbstractSpec, BaseModel):
class Config:
title = "S3 Source Spec"
class S3Provider(BaseModel):
class Config:
title = "S3: Amazon Web Services"
# SourceFilesAbstractSpec field are ordered 10 apart to allow subclasses to insert their own spec's fields interspersed
schema_extra = {"order": 11, "description": "Use this to load files from S3 or S3-compatible services"}
bucket: str = Field(description="Name of the S3 bucket where the file(s) exist.", order=0)
aws_access_key_id: Optional[str] = Field(
title="AWS Access Key ID",
default=None,
description="In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper "
"permissions. If accessing publicly available data, this field is not necessary.",
airbyte_secret=True,
always_show=True,
order=1,
)
aws_secret_access_key: Optional[str] = Field(
title="AWS Secret Access Key",
default=None,
description="In order to access private Buckets stored on AWS S3, this connector requires credentials with the proper "
"permissions. If accessing publicly available data, this field is not necessary.",
airbyte_secret=True,
always_show=True,
order=2,
)
role_arn: Optional[str] = Field(
title=f"AWS Role ARN",
default=None,
description="Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations "
f"requested using this profile. Set the External ID to the Airbyte workspace ID, which can be found in the URL of this page.",
always_show=True,
order=7,
)
path_prefix: str = Field(
default="",
description="By providing a path-like prefix (e.g. myFolder/thisTable/) under which all the relevant files sit, "
"we can optimize finding these in S3. This is optional but recommended if your bucket contains many "
"folders/files which you don't need to replicate.",
order=3,
)
endpoint: str = Field("", description="Endpoint to an S3 compatible service. Leave empty to use AWS.", order=4)
region_name: Optional[str] = Field(
title="AWS Region",
default=None,
description="AWS region where the S3 bucket is located. If not provided, the region will be determined automatically.",
order=5,
)
start_date: Optional[str] = Field(
title="Start Date",
description="UTC date and time in the format 2017-01-25T00:00:00Z. Any file modified before this date will not be replicated.",
examples=["2021-01-01T00:00:00Z"],
format="date-time",
pattern="^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$",
order=6,
)
provider: S3Provider
| SourceS3Spec |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/shared.py | {
"start": 723,
"end": 2214
} | class ____(SearchStrategy[Ex]):
def __init__(self, base: SearchStrategy[Ex], key: Hashable | None = None):
super().__init__()
self.key = key
self.base = base
def __repr__(self) -> str:
if self.key is not None:
return f"shared({self.base!r}, key={self.key!r})"
else:
return f"shared({self.base!r})"
def calc_label(self) -> int:
return self.base.calc_label()
# Ideally would be -> Ex, but key collisions with different-typed values are
# possible. See https://github.com/HypothesisWorks/hypothesis/issues/4301.
def do_draw(self, data: ConjectureData) -> Any:
key = self.key or self
if key not in data._shared_strategy_draws:
drawn = data.draw(self.base)
data._shared_strategy_draws[key] = (drawn, self)
else:
drawn, other = data._shared_strategy_draws[key]
# Check that the strategies shared under this key are equivalent
if self.label != other.label:
warnings.warn(
f"Different strategies are shared under {key=}. This"
" risks drawing values that are not valid examples for the strategy,"
" or that have a narrower range than expected."
f" Conflicting strategies: ({self!r}, {other!r}).",
HypothesisWarning,
stacklevel=1,
)
return drawn
| SharedStrategy |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 128856,
"end": 146077
} | class ____(Enum):
# With Python 3.11+ you can use StrEnum
generic = "Generic"
accessibility = "Accessibility"
airplane = "Airplane"
airplane_take_off = "AirplaneTakeOff"
album = "Album"
alert = "Alert"
alert_urgent = "AlertUrgent"
animal = "Animal"
animal_cat = "AnimalCat"
animal_dog = "AnimalDog"
animal_rabbit = "AnimalRabbit"
animal_turtle = "AnimalTurtle"
app_folder = "AppFolder"
app_generic = "AppGeneric"
apple = "Apple"
approvals_app = "ApprovalsApp"
archive = "Archive"
archive_multiple = "ArchiveMultiple"
arrow_trending_lines = "ArrowTrendingLines"
art = "Art"
atom = "Atom"
attach = "Attach"
automobile = "Automobile"
autosum = "Autosum"
backpack = "Backpack"
badge = "Badge"
balloon = "Balloon"
bank = "Bank"
barcode_scanner = "BarcodeScanner"
basketball = "Basketball"
battery0 = "Battery0"
battery10 = "Battery10"
beach = "Beach"
beaker = "Beaker"
bed = "Bed"
bin_full = "BinFull"
bird = "Bird"
bluetooth = "Bluetooth"
board = "Board"
board_games = "BoardGames"
book = "Book"
bookmark = "Bookmark"
bookmark_multiple = "BookmarkMultiple"
bot = "Bot"
bowl_chopsticks = "BowlChopsticks"
box = "Box"
box_multiple = "BoxMultiple"
brain_circuit = "BrainCircuit"
branch = "Branch"
branch_fork = "BranchFork"
branch_request = "BranchRequest"
bridge = "Bridge"
briefcase = "Briefcase"
briefcase_medical = "BriefcaseMedical"
broad_activity_feed = "BroadActivityFeed"
broom = "Broom"
bug = "Bug"
building = "Building"
building_bank = "BuildingBank"
building_factory = "BuildingFactory"
building_government = "BuildingGovernment"
building_home = "BuildingHome"
building_lighthouse = "BuildingLighthouse"
building_multiple = "BuildingMultiple"
building_retail = "BuildingRetail"
building_retail_more = "BuildingRetailMore"
building_retail_toolbox = "BuildingRetailToolbox"
building_shop = "BuildingShop"
building_skyscraper = "BuildingSkyscraper"
calculator = "Calculator"
calendar_ltr = "CalendarLtr"
calendar_rtl = "CalendarRtl"
call = "Call"
calligraphy_pen = "CalligraphyPen"
camera = "Camera"
camera_dome = "CameraDome"
car = "Car"
cart = "Cart"
cat = "Cat"
certificate = "Certificate"
chart_multiple = "ChartMultiple"
chat = "Chat"
chat_multiple = "ChatMultiple"
chat_video = "ChatVideo"
check = "Check"
checkbox_checked = "CheckboxChecked"
checkbox_unchecked = "CheckboxUnchecked"
checkmark = "Checkmark"
chess = "Chess"
city = "City"
class_ = "Class"
classification = "Classification"
clipboard = "Clipboard"
clipboard_data_bar = "ClipboardDataBar"
clipboard_pulse = "ClipboardPulse"
clipboard_task = "ClipboardTask"
clock = "Clock"
clock_alarm = "ClockAlarm"
cloud = "Cloud"
cloud_words = "CloudWords"
code = "Code"
collections = "Collections"
comment = "Comment"
comment_multiple = "CommentMultiple"
communication = "Communication"
compass_northwest = "CompassNorthwest"
conference_room = "ConferenceRoom"
connector = "Connector"
constellation = "Constellation"
contact_card = "ContactCard"
cookies = "Cookies"
couch = "Couch"
credit_card_person = "CreditCardPerson"
credit_card_toolbox = "CreditCardToolbox"
cube = "Cube"
cube_multiple = "CubeMultiple"
cube_tree = "CubeTree"
currency_dollar_euro = "CurrencyDollarEuro"
currency_dollar_rupee = "CurrencyDollarRupee"
data_area = "DataArea"
database = "Database"
database_multiple = "DatabaseMultiple"
data_funnel = "DataFunnel"
data_histogram = "DataHistogram"
data_line = "DataLine"
data_pie = "DataPie"
data_scatter = "DataScatter"
data_sunburst = "DataSunburst"
data_treemap = "DataTreemap"
data_waterfall = "DataWaterfall"
data_whisker = "DataWhisker"
dentist = "Dentist"
design_ideas = "DesignIdeas"
desktop = "Desktop"
desktop_mac = "DesktopMac"
developer_board = "DeveloperBoard"
device_meeting_room = "DeviceMeetingRoom"
diagram = "Diagram"
dialpad = "Dialpad"
diamond = "Diamond"
dinosaur = "Dinosaur"
directions = "Directions"
disaster = "Disaster"
diversity = "Diversity"
dna = "DNA"
doctor = "Doctor"
document = "Document"
document_data = "DocumentData"
document_landscape = "DocumentLandscape"
document_multiple = "DocumentMultiple"
document_pdf = "DocumentPdf"
document_queue = "DocumentQueue"
document_text = "DocumentText"
dog = "Dog"
door = "Door"
door_tag = "DoorTag"
drafts = "Drafts"
drama = "Drama"
drink_beer = "DrinkBeer"
drink_coffee = "DrinkCoffee"
drink_margarita = "DrinkMargarita"
drink_to_go = "DrinkToGo"
drink_wine = "DrinkWine"
drive_train = "DriveTrain"
drop = "Drop"
dual_screen = "DualScreen"
dumbbell = "Dumbbell"
earth = "Earth"
emoji = "Emoji"
emoji_angry = "EmojiAngry"
emoji_hand = "EmojiHand"
emoji_laugh = "EmojiLaugh"
emoji_meh = "EmojiMeh"
emoji_multiple = "EmojiMultiple"
emoji_sad = "EmojiSad"
emoji_sad_slight = "EmojiSadSlight"
emoji_smile_slight = "EmojiSmileSlight"
emoji_sparkle = "EmojiSparkle"
emoji_surprise = "EmojiSurprise"
engine = "Engine"
eraser = "Eraser"
eye = "Eye"
eyedropper = "Eyedropper"
fax = "Fax"
fingerprint = "Fingerprint"
first_aid = "FirstAid"
flag = "Flag"
flash = "Flash"
flashlight = "Flashlight"
flow = "Flow"
flowchart = "Flowchart"
folder = "Folder"
folder_open = "FolderOpen"
folder_open_vertical = "FolderOpenVertical"
folder_person = "FolderPerson"
folder_zip = "FolderZip"
food = "Food"
food_apple = "FoodApple"
food_cake = "FoodCake"
food_egg = "FoodEgg"
food_grains = "FoodGrains"
food_pizza = "FoodPizza"
food_toast = "FoodToast"
galaxy = "Galaxy"
games = "Games"
gantt_chart = "GanttChart"
gas = "Gas"
gas_pump = "GasPump"
gauge = "Gauge"
gavel = "Gavel"
gift = "Gift"
gift_card = "GiftCard"
glasses = "Glasses"
globe = "Globe"
globe_surface = "GlobeSurface"
grid = "Grid"
grid_dots = "GridDots"
grid_kanban = "GridKanban"
guardian = "Guardian"
guest = "Guest"
guitar = "Guitar"
hand_left = "HandLeft"
hand_right = "HandRight"
handshake = "Handshake"
hard_drive = "HardDrive"
hat_graduation = "HatGraduation"
headphones = "Headphones"
headphones_sound_wave = "HeadphonesSoundWave"
headset = "Headset"
headset_vr = "HeadsetVr"
heart = "Heart"
heart_broken = "HeartBroken"
heart_circle = "HeartCircle"
heart_human = "HeartHuman"
heart_pulse = "HeartPulse"
history = "History"
home = "Home"
home_more = "HomeMore"
home_person = "HomePerson"
icons = "Icons"
image = "Image"
image_globe = "ImageGlobe"
image_multiple = "ImageMultiple"
iot = "Iot"
joystick = "Joystick"
justice = "Justice"
key = "Key"
keyboard = "Keyboard"
keyboard_layout_split = "KeyboardLayoutSplit"
key_multiple = "KeyMultiple"
languages = "Languages"
laptop = "Laptop"
lasso = "Lasso"
launcher_settings = "LauncherSettings"
layer = "Layer"
leaf = "Leaf"
leaf_one = "LeafOne"
leaf_three = "LeafThree"
leaf_two = "LeafTwo"
library = "Library"
lightbulb = "Lightbulb"
lightbulb_filament = "LightbulbFilament"
likert = "Likert"
link = "Link"
local_language = "LocalLanguage"
location = "Location"
lock_closed = "LockClosed"
lock_multiple = "LockMultiple"
lock_open = "LockOpen"
lottery = "Lottery"
luggage = "Luggage"
mail = "Mail"
mail_inbox = "MailInbox"
mail_multiple = "MailMultiple"
map = "Map"
map_pin = "MapPin"
markdown = "Markdown"
math_formula = "MathFormula"
math_symbols = "MathSymbols"
max = "Max"
megaphone = "Megaphone"
megaphone_loud = "MegaphoneLoud"
mention = "Mention"
mic = "Mic"
microscope = "Microscope"
midi = "Midi"
molecule = "Molecule"
money = "Money"
money_hand = "MoneyHand"
mountain = "Mountain"
movie_camera = "MovieCamera"
movies_and_tv = "MoviesAndTv"
music_note = "MusicNote"
music_note1 = "MusicNote1"
music_note2 = "MusicNote2"
my_location = "MyLocation"
n_by_n = "NByN"
n_by_one = "NByOne"
news = "News"
notable_people = "NotablePeople"
note = "Note"
notebook = "Notebook"
notepad = "Notepad"
notepad_person = "NotepadPerson"
one_by_n = "OneByN"
one_by_one = "OneByOne"
options = "Options"
organization = "Organization"
organization_horizontal = "OrganizationHorizontal"
oval = "Oval"
paint_brush = "PaintBrush"
paint_bucket = "PaintBucket"
partly_sunny_weather = "PartlySunnyWeather"
password = "Password"
patch = "Patch"
patient = "Patient"
payment = "Payment"
pen = "Pen"
pentagon = "Pentagon"
people = "People"
people_audience = "PeopleAudience"
people_call = "PeopleCall"
people_community = "PeopleCommunity"
people_money = "PeopleMoney"
people_queue = "PeopleQueue"
people_team = "PeopleTeam"
people_toolbox = "PeopleToolbox"
person = "Person"
person_board = "PersonBoard"
person_call = "PersonCall"
person_chat = "PersonChat"
person_feedback = "PersonFeedback"
person_support = "PersonSupport"
person_voice = "PersonVoice"
phone = "Phone"
phone_desktop = "PhoneDesktop"
phone_laptop = "PhoneLaptop"
phone_shake = "PhoneShake"
phone_tablet = "PhoneTablet"
phone_vibrate = "PhoneVibrate"
photo_filter = "PhotoFilter"
pi = "Pi"
picture_in_picture = "PictureInPicture"
pilates = "Pilates"
pill = "Pill"
pin = "Pin"
pipeline = "Pipeline"
planet = "Planet"
playing_cards = "PlayingCards"
plug_connected = "PlugConnected"
plug_disconnected = "PlugDisconnected"
point_scan = "PointScan"
poll = "Poll"
power = "Power"
predictions = "Predictions"
premium = "Premium"
presenter = "Presenter"
preview_link = "PreviewLink"
print = "Print"
production = "Production"
prohibited = "Prohibited"
projection_screen = "ProjectionScreen"
protocol_handler = "ProtocolHandler"
pulse = "Pulse"
pulse_square = "PulseSquare"
puzzle_piece = "PuzzlePiece"
qr_code = "QrCode"
radar = "Radar"
ram = "Ram"
reading_list = "ReadingList"
real_estate = "RealEstate"
receipt = "Receipt"
reward = "Reward"
rhombus = "Rhombus"
ribbon = "Ribbon"
ribbon_star = "RibbonStar"
road_cone = "RoadCone"
rocket = "Rocket"
router = "Router"
rss = "Rss"
ruler = "Ruler"
run = "Run"
running = "Running"
satellite = "Satellite"
save = "Save"
savings = "Savings"
scales = "Scales"
scan = "Scan"
scratchpad = "Scratchpad"
screen_person = "ScreenPerson"
screenshot = "Screenshot"
search = "Search"
serial_port = "SerialPort"
server = "Server"
server_multiple = "ServerMultiple"
service_bell = "ServiceBell"
settings = "Settings"
shapes = "Shapes"
shield = "Shield"
shield_task = "ShieldTask"
shopping_bag = "ShoppingBag"
signature = "Signature"
sim = "Sim"
sleep = "Sleep"
smartwatch = "Smartwatch"
sound_source = "SoundSource"
sound_wave_circle = "SoundWaveCircle"
sparkle = "Sparkle"
speaker0 = "Speaker0"
speaker2 = "Speaker2"
sport = "Sport"
sport_american_football = "SportAmericanFootball"
sport_baseball = "SportBaseball"
sport_basketball = "SportBasketball"
sport_hockey = "SportHockey"
sport_soccer = "SportSoccer"
square_multiple = "SquareMultiple"
square_shadow = "SquareShadow"
squares_nested = "SquaresNested"
stack = "Stack"
stack_star = "StackStar"
star = "Star"
star_filled = "StarFilled"
star_half = "StarHalf"
star_line_horizontal3 = "StarLineHorizontal3"
star_one_quarter = "StarOneQuarter"
star_three_quarter = "StarThreeQuarter"
status = "Status"
steps = "Steps"
stethoscope = "Stethoscope"
sticker = "Sticker"
storage = "Storage"
stream = "Stream"
stream_input = "StreamInput"
stream_input_output = "StreamInputOutput"
stream_output = "StreamOutput"
style_guide = "StyleGuide"
sub_grid = "SubGrid"
subtitles = "Subtitles"
surface_earbuds = "SurfaceEarbuds"
surface_hub = "SurfaceHub"
symbols = "Symbols"
syringe = "Syringe"
system = "System"
tab_desktop = "TabDesktop"
tab_inprivate_account = "TabInprivateAccount"
table = "Table"
table_image = "TableImage"
table_multiple = "TableMultiple"
tablet = "Tablet"
tabs = "Tabs"
tag = "Tag"
tag_circle = "TagCircle"
tag_multiple = "TagMultiple"
target = "Target"
target_arrow = "TargetArrow"
teddy = "Teddy"
temperature = "Temperature"
tent = "Tent"
tetris_app = "TetrisApp"
textbox = "Textbox"
text_quote = "TextQuote"
thinking = "Thinking"
thumb_dislike = "ThumbDislike"
thumb_like = "ThumbLike"
ticket_diagonal = "TicketDiagonal"
ticket_horizontal = "TicketHorizontal"
time_and_weather = "TimeAndWeather"
timeline = "Timeline"
timer = "Timer"
toolbox = "Toolbox"
top_speed = "TopSpeed"
translate = "Translate"
transmission = "Transmission"
tree_deciduous = "TreeDeciduous"
tree_evergreen = "TreeEvergreen"
trophy = "Trophy"
tv = "Tv"
tv_usb = "TvUsb"
umbrella = "Umbrella"
usb_plug = "UsbPlug"
usb_stick = "UsbStick"
vault = "Vault"
vehicle_bicycle = "VehicleBicycle"
vehicle_bus = "VehicleBus"
vehicle_cab = "VehicleCab"
vehicle_car = "VehicleCar"
vehicle_car_collision = "VehicleCarCollision"
vehicle_car_profile_ltr = "VehicleCarProfileLtr"
vehicle_car_profile_rtl = "VehicleCarProfileRtl"
vehicle_ship = "VehicleShip"
vehicle_subway = "VehicleSubway"
vehicle_truck = "VehicleTruck"
vehicle_truck_bag = "VehicleTruckBag"
vehicle_truck_cube = "VehicleTruckCube"
vehicle_truck_profile = "VehicleTruckProfile"
video = "Video"
video360 = "Video360"
video_chat = "VideoChat"
video_clip = "VideoClip"
video_clip_multiple = "VideoClipMultiple"
video_person = "VideoPerson"
video_recording = "VideoRecording"
video_security = "VideoSecurity"
view_desktop = "ViewDesktop"
view_desktop_mobile = "ViewDesktopMobile"
violin = "Violin"
virtual_network = "VirtualNetwork"
voicemail = "Voicemail"
vote = "Vote"
walkie_talkie = "WalkieTalkie"
wallet = "Wallet"
wallet_credit_card = "WalletCreditCard"
wallpaper = "Wallpaper"
wand = "Wand"
warning = "Warning"
weather_blowing_snow = "WeatherBlowingSnow"
weather_cloudy = "WeatherCloudy"
weather_drizzle = "WeatherDrizzle"
weather_duststorm = "WeatherDuststorm"
weather_fog = "WeatherFog"
weather_hail_day = "WeatherHailDay"
weather_hail_night = "WeatherHailNight"
weather_haze = "WeatherHaze"
weather_moon = "WeatherMoon"
weather_partly_cloudy_day = "WeatherPartlyCloudyDay"
weather_partly_cloudy_night = "WeatherPartlyCloudyNight"
weather_rain = "WeatherRain"
weather_rain_showers_day = "WeatherRainShowersDay"
weather_rain_showers_night = "WeatherRainShowersNight"
weather_rain_snow = "WeatherRainSnow"
weather_snow = "WeatherSnow"
weather_snowflake = "WeatherSnowflake"
weather_snow_shower_day = "WeatherSnowShowerDay"
weather_snow_shower_night = "WeatherSnowShowerNight"
weather_squalls = "WeatherSqualls"
weather_sunny_high = "WeatherSunnyHigh"
weather_sunny_low = "WeatherSunnyLow"
weather_thunderstorm = "WeatherThunderstorm"
web_asset = "WebAsset"
whiteboard = "Whiteboard"
wifi1 = "Wifi1"
wifi2 = "Wifi2"
window = "Window"
window_multiple = "WindowMultiple"
window_wrench = "WindowWrench"
wrench = "Wrench"
wrench_screwdriver = "WrenchScrewdriver"
xray = "Xray"
yoga = "Yoga"
if __name__ == "__main__":
import re
from enum import Enum
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def convert_enum_class(enum_class):
new_attributes = {
camel_to_snake(name): value.value
for name, value in enum_class.__members__.items()
}
return Enum(enum_class.__name__, new_attributes)
# Convert the Enum class
ObjectHandleIcons = convert_enum_class(ObjectHandleIcons)
# Print the converted Enum class
for icon in ObjectHandleIcons:
# Using lower case attributes to follow FastAPI/Pydantic code base and because
# it is also lower case in Office Scripts
print(f'{icon.name} = "{icon.value}"')
| ObjectHandleIcons |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.