language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/perception_lm/modular_perception_lm.py
|
{
"start": 3038,
"end": 3129
}
|
class ____(LlavaPreTrainedModel):
base_model_prefix = "model"
|
PerceptionLMPreTrainedModel
|
python
|
great-expectations__great_expectations
|
great_expectations/experimental/metric_repository/metrics.py
|
{
"start": 1798,
"end": 4862
}
|
class ____(MetricRepositoryBaseModel, Generic[_ValueType]):
"""Abstract computed metric. Domain, value and parameters are metric dependent.
Note: This implementation does not currently take into account
other domain modifiers, e.g. row_condition, condition_parser, ignore_row_if
"""
def __new__(cls, *args, **kwargs):
if cls is Metric:
raise NotImplementedError("Metric is an abstract class.")
instance = super().__new__(cls)
return instance
batch_id: str = Field(description="Batch id")
metric_name: str = Field(description="Metric name")
value: _ValueType = Field(description="Metric value")
exception: Optional[MetricException] = Field(
description="Exception info if thrown", default=None
)
@classmethod
@override
def update_forward_refs(cls):
from great_expectations.datasource.fluent.interfaces import Batch
super().update_forward_refs(
Batch=Batch,
)
@property
def value_type(self) -> str:
type_ = self.__orig_class__.__args__[0] # type: ignore[attr-defined] # __orig_class__ is used to get the generic type
string_rep = str(type_)
if string_rep.startswith("<class"):
return type_.__name__
else:
return string_rep
@property
def metric_type(self) -> str:
return self.__class__.__name__
@classmethod
def _get_properties(cls):
"""in pydandic v2 we can use computed_field.
https://docs.pydantic.dev/latest/usage/computed_fields/"""
properties = [prop for prop in cls.__dict__ if isinstance(cls.__dict__[prop], property)]
return properties
@override
def dict( # noqa: PLR0913 # FIXME CoP
self,
*,
include: AbstractSetIntStr | MappingIntStrAny | None = None,
exclude: AbstractSetIntStr | MappingIntStrAny | None = None,
by_alias: bool = False,
skip_defaults: Optional[bool] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> Dict[str, Any]:
"""Override the dict function to include @property fields, in pydandic v2 we can use computed_field.
https://docs.pydantic.dev/latest/usage/computed_fields/
""" # noqa: E501 # FIXME CoP
attribs = super().dict(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
)
props = self._get_properties()
# Include and exclude properties
if include:
props = [prop for prop in props if prop in include]
if exclude:
props = [prop for prop in props if prop not in exclude]
# Update the attribute dict with the properties
if props:
attribs.update({prop: getattr(self, prop) for prop in props})
return attribs
# Metric domain types
|
Metric
|
python
|
getsentry__sentry
|
src/sentry/issues/grouptype.py
|
{
"start": 19062,
"end": 19386
}
|
class ____(GroupType):
type_id = 2001
slug = "profile_file_io_main_thread"
description = "File I/O on Main Thread"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.MOBILE.value
default_priority = PriorityLevel.LOW
released = True
@dataclass(frozen=True)
|
ProfileFileIOGroupType
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/tasks.py
|
{
"start": 88074,
"end": 91229
}
|
class ____(Request):
"""
Indicates that task is closed
:param force: Allows forcing state change even if transition is not supported
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "close"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Allows forcing state change even if transition is not supported",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(CloseRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
|
CloseRequest
|
python
|
huggingface__transformers
|
src/transformers/models/informer/modular_informer.py
|
{
"start": 15385,
"end": 20847
}
|
class ____(TimeSeriesTransformerEncoder):
def __init__(self, config: InformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.gradient_checkpointing = False
if config.prediction_length is None:
raise ValueError("The `prediction_length` config needs to be specified.")
self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = InformerSinusoidalPositionalEmbedding(
config.context_length + config.prediction_length, config.d_model
)
self.layers = nn.ModuleList([InformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
if config.distil:
self.conv_layers = nn.ModuleList(
[InformerConvLayer(config.d_model) for _ in range(config.encoder_layers - 1)]
)
self.conv_layers.append(None)
else:
self.conv_layers = [None] * config.encoder_layers
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size())
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, (encoder_layer, conv_layer) in enumerate(zip(self.layers, self.conv_layers)):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
if conv_layer is not None:
output = conv_layer(layer_outputs[0])
layer_outputs = (output,) + layer_outputs[1:]
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
|
InformerEncoder
|
python
|
django__django
|
tests/model_formsets/models.py
|
{
"start": 1067,
"end": 1461
}
|
class ____(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, models.SET_NULL, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (("author", "title", "alt_editor"),)
def __str__(self):
return self.title
|
BookWithOptionalAltEditor
|
python
|
tornadoweb__tornado
|
tornado/platform/asyncio.py
|
{
"start": 10207,
"end": 10953
}
|
class ____(BaseAsyncIOLoop):
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
current ``asyncio`` event loop (i.e. the one returned by
``asyncio.get_event_loop()``).
.. deprecated:: 5.0
Now used automatically when appropriate; it is no longer necessary
to refer to this class directly.
.. versionchanged:: 5.0
Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop.
"""
def initialize(self, **kwargs: Any) -> None: # type: ignore
super().initialize(asyncio.get_event_loop(), **kwargs)
def _make_current(self) -> None:
# AsyncIOMainLoop already refers to the current asyncio loop so
# nothing to do here.
pass
|
AsyncIOMainLoop
|
python
|
huggingface__transformers
|
src/transformers/models/mpnet/modeling_mpnet.py
|
{
"start": 3974,
"end": 6710
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.q = nn.Linear(config.hidden_size, self.all_head_size)
self.k = nn.Linear(config.hidden_size, self.all_head_size)
self.v = nn.Linear(config.hidden_size, self.all_head_size)
self.o = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
output_attentions=False,
**kwargs,
):
batch_size, seq_length, _ = hidden_states.shape
q = (
self.q(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
k = (
self.k(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
v = (
self.v(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(q, k.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply relative position embedding (precomputed in MPNetEncoder) if provided.
if position_bias is not None:
attention_scores += position_bias
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
c = torch.matmul(attention_probs, v)
c = c.permute(0, 2, 1, 3).contiguous()
new_c_shape = c.size()[:-2] + (self.all_head_size,)
c = c.view(*new_c_shape)
o = self.o(c)
outputs = (o, attention_probs) if output_attentions else (o,)
return outputs
|
MPNetSelfAttention
|
python
|
facebook__pyre-check
|
client/configuration/tests/configuration_test.py
|
{
"start": 1191,
"end": 22393
}
|
class ____(unittest.TestCase):
def test_create_from_command_arguments(self) -> None:
configuration = PartialConfiguration.from_command_arguments(
command_arguments.CommandArguments(
local_configuration=None,
logger="logger",
targets=[],
source_directories=[],
search_path=["x", "y"],
optional_search_path=["z"],
binary="binary",
buck_mode="opt",
exclude=["excludes"],
typeshed="typeshed",
dot_pyre_directory=Path(".pyre"),
python_version="3.6.7",
system_platform="darwin",
shared_memory_heap_size=42,
number_of_workers=43,
enable_unawaited_awaitable_analysis=True,
include_suppressed_errors=True,
only_privacy_errors=False,
)
)
self.assertEqual(configuration.binary, "binary")
self.assertEqual(
configuration.buck_mode, PlatformAware.from_json("opt", "buck_mode")
)
self.assertEqual(configuration.dot_pyre_directory, Path(".pyre"))
self.assertListEqual(list(configuration.excludes), ["excludes"])
self.assertEqual(configuration.logger, "logger")
self.assertEqual(configuration.oncall, None)
self.assertListEqual(
list(configuration.search_path),
[SimpleRawElement("x"), SimpleRawElement("y")],
)
self.assertListEqual(
list(configuration.optional_search_path), [SimpleRawElement("z")]
)
self.assertIsNone(configuration.source_directories)
self.assertEqual(configuration.strict, None)
self.assertIsNone(configuration.targets)
self.assertEqual(configuration.typeshed, "typeshed")
self.assertEqual(configuration.unwatched_dependency, None)
self.assertEqual(
configuration.python_version, PythonVersion(major=3, minor=6, micro=7)
)
self.assertEqual(configuration.system_platform, "darwin")
self.assertEqual(configuration.shared_memory, SharedMemory(heap_size=42))
self.assertEqual(configuration.site_package_search_strategy, None)
self.assertEqual(configuration.site_roots, None)
self.assertEqual(configuration.number_of_workers, 43)
self.assertEqual(configuration.max_number_of_workers, None)
self.assertEqual(configuration.enable_readonly_analysis, None)
self.assertEqual(configuration.enable_strict_override_check, None)
self.assertEqual(configuration.enable_strict_any_check, None)
self.assertEqual(configuration.enable_unawaited_awaitable_analysis, True)
self.assertEqual(configuration.include_suppressed_errors, True)
self.assertEqual(configuration.only_privacy_errors, False)
def test_create_from_string_success(self) -> None:
self.assertEqual(
PartialConfiguration.from_dict({"binary": "foo"}).binary,
"foo",
)
for mode in [
"foo",
{"default": "foo"},
{"linux": "foo"},
{"default": "bar", "macos": "foo", "linux": "foo"},
]:
buck_mode = PartialConfiguration.from_dict({"buck_mode": mode}).buck_mode
expected_value = PlatformAware.from_json("foo", "buck_mode")
self.assertIsNotNone(buck_mode)
self.assertIsNotNone(expected_value)
self.assertEqual(buck_mode.get(), expected_value.get())
for null_mode in [{}, None]:
self.assertIsNone(
PartialConfiguration.from_dict({"buck_mode": null_mode}).buck_mode
)
self.assertEqual(
PartialConfiguration.from_dict({"bxl_builder": "foo"}).bxl_builder,
"foo",
)
self.assertListEqual(
list(
PartialConfiguration.from_dict(
{"only_check_paths": ["foo", "bar"]}
).only_check_paths
),
["foo", "bar"],
)
self.assertEqual(
PartialConfiguration.from_dict(
{"dot_pyre_directory": "foo"}
).dot_pyre_directory,
Path("foo"),
)
self.assertListEqual(
list(PartialConfiguration.from_dict({"exclude": "foo"}).excludes),
["foo"],
)
self.assertListEqual(
list(PartialConfiguration.from_dict({"exclude": ["foo", "bar"]}).excludes),
["foo", "bar"],
)
self.assertListEqual(
list(
PartialConfiguration.from_dict(
{"extensions": [".foo", ".bar"]}
).extensions
),
[ExtensionElement(".foo", False), ExtensionElement(".bar", False)],
)
self.assertListEqual(
list(
PartialConfiguration.from_dict(
{
"extensions": [
".foo",
{
"suffix": ".bar",
"include_suffix_in_module_qualifier": True,
},
{
"suffix": ".baz",
"include_suffix_in_module_qualifier": False,
},
]
}
).extensions
),
[
ExtensionElement(".foo", False),
ExtensionElement(".bar", True),
ExtensionElement(".baz", False),
],
)
self.assertListEqual(
list(
PartialConfiguration.from_dict(
{"ignore_all_errors": ["foo", "bar"]}
).ignore_all_errors
),
["foo", "bar"],
)
self.assertEqual(
PartialConfiguration.from_dict({"logger": "foo"}).logger,
"foo",
)
self.assertEqual(
PartialConfiguration.from_dict({"oncall": "foo"}).oncall,
"foo",
)
self.assertEqual(
PartialConfiguration.from_dict({"workers": 42}).number_of_workers,
42,
)
self.assertEqual(
PartialConfiguration.from_dict({"max_workers": 42}).max_number_of_workers,
42,
)
self.assertListEqual(
list(
PartialConfiguration.from_dict(
{"critical_files": ["foo", "bar"]}
).other_critical_files
),
["foo", "bar"],
)
self.assertListEqual(
list(PartialConfiguration.from_dict({"search_path": "foo"}).search_path),
[SimpleRawElement("foo")],
)
self.assertListEqual(
list(
PartialConfiguration.from_dict(
{"search_path": ["foo", {"root": "bar", "subdirectory": "baz"}]}
).search_path
),
[
SimpleRawElement("foo"),
SubdirectoryRawElement("bar", "baz"),
],
)
self.assertIsNone(
PartialConfiguration.from_dict({}).site_package_search_strategy
)
self.assertEqual(
PartialConfiguration.from_dict(
{"site_package_search_strategy": "pep561"}
).site_package_search_strategy,
SearchStrategy.PEP561,
)
self.assertEqual(PartialConfiguration.from_dict({"strict": True}).strict, True)
self.assertListEqual(
list(
PartialConfiguration.from_dict(
{"taint_models_path": "foo"}
).taint_models_path
),
["foo"],
)
self.assertListEqual(
list(
PartialConfiguration.from_dict(
{"taint_models_path": ["foo", "bar"]}
).taint_models_path
),
["foo", "bar"],
)
self.assertEqual(
PartialConfiguration.from_dict({"typeshed": "foo"}).typeshed,
"foo",
)
self.assertEqual(
PartialConfiguration.from_dict({"version": "abc"}).version_hash,
"abc",
)
self.assertEqual(
PartialConfiguration.from_dict({"pysa_version": "abc"}).pysa_version_hash,
"abc",
)
self.assertEqual(
PartialConfiguration.from_dict({"python_version": "3"}).python_version,
PythonVersion(major=3, minor=0, micro=0),
)
self.assertEqual(
PartialConfiguration.from_dict({"python_version": "3.6"}).python_version,
PythonVersion(major=3, minor=6, micro=0),
)
self.assertEqual(
PartialConfiguration.from_dict({"python_version": "3.6.7"}).python_version,
PythonVersion(major=3, minor=6, micro=7),
)
self.assertEqual(
PartialConfiguration.from_dict(
{"system_platform": "darwin"}
).system_platform,
"darwin",
)
self.assertEqual(
PartialConfiguration.from_dict(
{"shared_memory": {"heap_size": 1}}
).shared_memory,
SharedMemory(heap_size=1),
)
self.assertEqual(
PartialConfiguration.from_dict(
{"shared_memory": {"dependency_table_power": 2}}
).shared_memory,
SharedMemory(dependency_table_power=2),
)
self.assertEqual(
PartialConfiguration.from_dict(
{"shared_memory": {"hash_table_power": 3}}
).shared_memory,
SharedMemory(hash_table_power=3),
)
self.assertIsNone(PartialConfiguration.from_dict({}).source_directories)
source_directories = PartialConfiguration.from_dict(
{"source_directories": ["foo", "bar"]}
).source_directories
self.assertIsNotNone(source_directories)
self.assertListEqual(
list(source_directories),
[SimpleRawElement("foo"), SimpleRawElement("bar")],
)
self.assertIsNone(PartialConfiguration.from_dict({}).site_roots)
site_roots = PartialConfiguration.from_dict(
{"site_roots": ["foo", "bar"]}
).site_roots
self.assertIsNotNone(site_roots)
self.assertListEqual(
list(site_roots),
["foo", "bar"],
)
source_directories = PartialConfiguration.from_dict(
{
"source_directories": [
"foo",
{"root": "bar", "subdirectory": "baz"},
]
}
).source_directories
self.assertIsNotNone(source_directories)
self.assertListEqual(
list(source_directories),
[
SimpleRawElement("foo"),
SubdirectoryRawElement("bar", "baz"),
],
)
source_directories = PartialConfiguration.from_dict(
{
"source_directories": [
"foo",
{"import_root": "bar", "source": "baz"},
]
}
).source_directories
self.assertIsNotNone(source_directories)
self.assertListEqual(
list(source_directories),
[
SimpleRawElement("foo"),
SubdirectoryRawElement("bar", "baz"),
],
)
self.assertIsNone(PartialConfiguration.from_dict({}).targets)
targets = PartialConfiguration.from_dict(
{"targets": ["//foo", "//bar"]}
).targets
self.assertIsNotNone(targets)
self.assertListEqual(list(targets), ["//foo", "//bar"])
unwatched_dependency = PartialConfiguration.from_dict(
{
"unwatched_dependency": {
"change_indicator": "foo",
"files": {"root": "bar", "checksum_path": "baz"},
}
}
).unwatched_dependency
self.assertIsNotNone(unwatched_dependency)
self.assertEqual(
unwatched_dependency,
UnwatchedDependency(
change_indicator="foo",
files=UnwatchedFiles(root="bar", checksum_path="baz"),
),
)
self.assertEqual(
PartialConfiguration.from_dict(
{"enable_readonly_analysis": True}
).enable_readonly_analysis,
True,
)
self.assertEqual(
PartialConfiguration.from_dict({}).enable_readonly_analysis,
None,
)
self.assertEqual(
PartialConfiguration.from_dict(
{"enable_strict_override_check": True}
).enable_strict_override_check,
True,
)
self.assertEqual(
PartialConfiguration.from_dict({}).enable_strict_override_check,
None,
)
self.assertEqual(
PartialConfiguration.from_dict(
{"enable_strict_any_check": True}
).enable_strict_any_check,
True,
)
self.assertEqual(
PartialConfiguration.from_dict({}).enable_strict_any_check, None
)
self.assertEqual(
PartialConfiguration.from_dict(
{"enable_unawaited_awaitable_analysis": True}
).enable_unawaited_awaitable_analysis,
True,
)
self.assertEqual(
PartialConfiguration.from_dict({}).enable_unawaited_awaitable_analysis,
None,
)
self.assertEqual(
PartialConfiguration.from_dict(
{"include_suppressed_errors": True}
).include_suppressed_errors,
True,
)
self.assertEqual(
PartialConfiguration.from_dict(
{"only_privacy_errors": True}
).only_privacy_errors,
True,
)
self.assertEqual(
PartialConfiguration.from_dict({}).only_privacy_errors,
None,
)
def test_create_from_string_failure(self) -> None:
def assert_raises(content: str) -> None:
with self.assertRaises(InvalidConfiguration):
PartialConfiguration.from_dict(json.loads(content))
assert_raises(json.dumps({"binary": True}))
assert_raises(json.dumps({"buck_mode": {"default": 5}}))
assert_raises(json.dumps({"buck_mode": {"bad-platform": "mode"}}))
assert_raises(
json.dumps(
{
"buck_mode": {
"win": "valid",
"bad": "valid-also",
}
}
)
)
assert_raises(json.dumps({"bxl_builder": []}))
assert_raises(json.dumps({"only_check_paths": "abc"}))
assert_raises(json.dumps({"dot_pyre_directory": {}}))
assert_raises(json.dumps({"enable_readonly_analysis": 42}))
assert_raises(json.dumps({"enable_strict_override_check": 42}))
assert_raises(json.dumps({"enable_strict_any_check": 42}))
assert_raises(json.dumps({"enable_unawaited_awaitable_analysis": 42}))
assert_raises(json.dumps({"exclude": 42}))
assert_raises(json.dumps({"extensions": 42}))
assert_raises(json.dumps({"ignore_all_errors": [1, 2, 3]}))
assert_raises(json.dumps({"include_suppressed_errors": 42}))
assert_raises(json.dumps({"logger": []}))
assert_raises(json.dumps({"oncall": []}))
assert_raises(json.dumps({"workers": "abc"}))
assert_raises(json.dumps({"critical_files": "abc"}))
assert_raises(json.dumps({"source_directories": "abc"}))
assert_raises(json.dumps({"strict": 42}))
assert_raises(json.dumps({"taint_models_path": True}))
assert_raises(json.dumps({"taint_models_path": ["foo", 42]}))
assert_raises(json.dumps({"targets": "abc"}))
assert_raises(json.dumps({"typeshed": ["abc"]}))
assert_raises(json.dumps({"version": 123}))
assert_raises(json.dumps({"pysa_version": 123}))
assert_raises(json.dumps({"python_version": "abc"}))
assert_raises(json.dumps({"python_version": 42}))
assert_raises(json.dumps({"system_platform": 42}))
assert_raises(json.dumps({"shared_memory": "abc"}))
assert_raises(json.dumps({"shared_memory": {"heap_size": "abc"}}))
assert_raises(json.dumps({"site_package_search_strategy": False}))
assert_raises(json.dumps({"site_roots": 42}))
assert_raises(json.dumps({"unwatched_dependency": {"change_indicator": "abc"}}))
def test_expand_relative_paths(self) -> None:
self.assertEqual(
PartialConfiguration(binary="foo").expand_relative_paths("bar").binary,
"bar/foo",
)
self.assertEqual(
PartialConfiguration(binary="~/foo").expand_relative_paths("bar").binary,
str(Path.home() / "foo"),
)
self.assertEqual(
PartialConfiguration(only_check_paths=["foo", "bar"])
.expand_relative_paths("baz")
.only_check_paths,
["baz/foo", "baz/bar"],
)
self.assertEqual(
PartialConfiguration(ignore_all_errors=["foo", "bar"])
.expand_relative_paths("baz")
.ignore_all_errors,
["baz/foo", "baz/bar"],
)
self.assertEqual(
PartialConfiguration(logger="foo").expand_relative_paths("bar").logger,
"bar/foo",
)
self.assertEqual(
PartialConfiguration(other_critical_files=["foo", "bar"])
.expand_relative_paths("baz")
.other_critical_files,
["baz/foo", "baz/bar"],
)
configuration = PartialConfiguration(
search_path=[
SimpleRawElement("foo"),
SubdirectoryRawElement("bar", "baz"),
SitePackageRawElement("package"),
],
optional_search_path=[
SimpleRawElement("optional"),
],
).expand_relative_paths("root")
self.assertEqual(
configuration.search_path,
[
SimpleRawElement("root/foo"),
SubdirectoryRawElement("root/bar", "baz"),
SitePackageRawElement("package"),
],
)
self.assertEqual(
configuration.optional_search_path, [SimpleRawElement("root/optional")]
)
self.assertEqual(
PartialConfiguration(
source_directories=[
SimpleRawElement("foo"),
SimpleRawElement("bar"),
]
)
.expand_relative_paths("baz")
.source_directories,
[
SimpleRawElement("baz/foo"),
SimpleRawElement("baz/bar"),
],
)
self.assertEqual(
PartialConfiguration(taint_models_path=["foo", "bar"])
.expand_relative_paths("baz")
.taint_models_path,
["baz/foo", "baz/bar"],
)
self.assertEqual(
PartialConfiguration(typeshed="foo").expand_relative_paths("bar").typeshed,
"bar/foo",
)
self.assertEqual(
PartialConfiguration(enable_readonly_analysis=True)
.expand_relative_paths("bar")
.enable_readonly_analysis,
True,
)
self.assertEqual(
PartialConfiguration(enable_strict_override_check=True)
.expand_relative_paths("bar")
.enable_strict_override_check,
True,
)
self.assertEqual(
PartialConfiguration(enable_strict_any_check=True)
.expand_relative_paths("bar")
.enable_strict_any_check,
True,
)
self.assertEqual(
PartialConfiguration(enable_unawaited_awaitable_analysis=True)
.expand_relative_paths("bar")
.enable_unawaited_awaitable_analysis,
True,
)
def assert_expanded_unwatched_root(
original: str, root: str, expected: str
) -> None:
actual = (
PartialConfiguration(
unwatched_dependency=UnwatchedDependency(
change_indicator="indicator",
files=UnwatchedFiles(root=original, checksum_path="checksum"),
)
)
.expand_relative_paths(root)
.unwatched_dependency
)
self.assertIsNotNone(actual)
self.assertEqual(actual.files.root, expected)
assert_expanded_unwatched_root(
original="foo",
root="bar",
expected="bar/foo",
)
|
PartialConfigurationTest
|
python
|
PyCQA__pylint
|
tests/functional/m/method_hidden.py
|
{
"start": 545,
"end": 764
}
|
class ____:
"""dummy"""
def __init__(self, _):
pass
def __get__(self, obj, __):
if not obj:
return self
return 5
def __set__(self, _, __):
pass
|
CustomProperty
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/dashboard.py
|
{
"start": 21527,
"end": 21664
}
|
class ____(TypedDict, total=False):
release: list[str]
releaseId: list[str]
globalFilter: list[dict[str, Any]]
|
DashboardFilters
|
python
|
miyuchina__mistletoe
|
mistletoe/block_token.py
|
{
"start": 1975,
"end": 4221
}
|
class ____(token.Token):
"""
Base class for block-level tokens. Recursively parse inner tokens.
Naming conventions:
* lines denotes a list of (possibly unparsed) input lines, and is
commonly used as the argument name for constructors.
* BlockToken.children is a list with all the inner tokens (thus if
a token has children attribute, it is not a leaf node; if a token
calls span_token.tokenize_inner, it is the boundary between
span-level tokens and block-level tokens);
* BlockToken.start takes a line from the document as argument, and
returns a boolean representing whether that line marks the start
of the current token. Every subclass of BlockToken must define a
start function (see block_tokenizer.tokenize).
* BlockToken.read takes the rest of the lines in the document as an
iterator (including the start line), and consumes all the lines
that should be read into this token.
Default to stop at an empty line.
Note that BlockToken.read does not have to return a list of lines.
Because the return value of this function will be directly
passed into the token constructor, we can return any relevant
parsing information, sometimes even ready-made tokens,
into the constructor. See block_tokenizer.tokenize.
If BlockToken.read returns None, the read result is ignored,
but the token class is responsible for resetting the iterator
to a previous state. See block_tokenizer.FileWrapper.get_pos,
block_tokenizer.FileWrapper.set_pos.
Attributes:
children (list): inner tokens.
line_number (int): starting line (1-based).
"""
repr_attributes = ("line_number",)
def __init__(self, lines, tokenize_func):
self.children = tokenize_func(lines)
def __contains__(self, text):
return any(text in child for child in self.children)
@staticmethod
def read(lines):
line_buffer = [next(lines)]
for line in lines:
if line == '\n':
break
line_buffer.append(line)
return line_buffer
|
BlockToken
|
python
|
great-expectations__great_expectations
|
great_expectations/exceptions/exceptions.py
|
{
"start": 1528,
"end": 1586
}
|
class ____(DataContextError):
pass
|
ExpectationSuiteError
|
python
|
walkccc__LeetCode
|
solutions/1725. Number Of Rectangles That Can Form The Largest Square/1725.py
|
{
"start": 0,
"end": 176
}
|
class ____:
def countGoodRectangles(self, rectangles: list[list[int]]) -> int:
minSides = [min(x, y) for x, y in rectangles]
return minSides.count(max(minSides))
|
Solution
|
python
|
tornadoweb__tornado
|
tornado/gen.py
|
{
"start": 25823,
"end": 31765
}
|
class ____:
"""Internal implementation of `tornado.gen.coroutine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.Future`)
"""
def __init__(
self,
ctx_run: Callable,
gen: "Generator[_Yieldable, Any, _T]",
result_future: "Future[_T]",
first_yielded: _Yieldable,
) -> None:
self.ctx_run = ctx_run
self.gen = gen
self.result_future = result_future
self.future = _null_future # type: Union[None, Future]
self.running = False
self.finished = False
self.io_loop = IOLoop.current()
if self.ctx_run(self.handle_yield, first_yielded):
gen = result_future = first_yielded = None # type: ignore
self.ctx_run(self.run)
def run(self) -> None:
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if future is None:
raise Exception("No pending future")
if not future.done():
return
self.future = None
try:
try:
value = future.result()
except Exception as e:
# Save the exception for later. It's important that
# gen.throw() not be called inside this try/except block
# because that makes sys.exc_info behave unexpectedly.
exc: Optional[Exception] = e
else:
exc = None
finally:
future = None
if exc is not None:
try:
yielded = self.gen.throw(exc)
finally:
# Break up a circular reference for faster GC on
# CPython.
del exc
else:
yielded = self.gen.send(value)
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
future_set_result_unless_cancelled(
self.result_future, _value_from_stopiteration(e)
)
self.result_future = None # type: ignore
return
except Exception:
self.finished = True
self.future = _null_future
future_set_exc_info(self.result_future, sys.exc_info())
self.result_future = None # type: ignore
return
if not self.handle_yield(yielded):
return
yielded = None
finally:
self.running = False
def handle_yield(self, yielded: _Yieldable) -> bool:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = Future()
future_set_exc_info(self.future, sys.exc_info())
if self.future is moment:
self.io_loop.add_callback(self.ctx_run, self.run)
return False
elif self.future is None:
raise Exception("no pending future")
elif not self.future.done():
def inner(f: Any) -> None:
# Break a reference cycle to speed GC.
f = None # noqa: F841
self.ctx_run(self.run)
self.io_loop.add_future(self.future, inner)
return False
return True
def handle_exception(
self, typ: Type[Exception], value: Exception, tb: types.TracebackType
) -> bool:
if not self.running and not self.finished:
self.future = Future()
future_set_exc_info(self.future, (typ, value, tb))
self.ctx_run(self.run)
return True
else:
return False
def _wrap_awaitable(awaitable: Awaitable) -> Future:
# Convert Awaitables into Futures.
# Note that we use ensure_future, which handles both awaitables
# and coroutines, rather than create_task, which only accepts
# coroutines. (ensure_future calls create_task if given a coroutine)
fut = asyncio.ensure_future(awaitable)
# See comments on IOLoop._pending_tasks.
loop = IOLoop.current()
loop._register_task(fut)
fut.add_done_callback(lambda f: loop._unregister_task(f))
return fut
def convert_yielded(yielded: _Yieldable) -> Future:
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and
Futures. This has the side effect of starting any coroutines that
did not start themselves, similar to `asyncio.ensure_future`.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
if yielded is None or yielded is moment:
return moment
elif yielded is _null_future:
return _null_future
elif isinstance(yielded, (list, dict)):
return multi(yielded) # type: ignore
elif is_future(yielded):
return typing.cast(Future, yielded)
elif isawaitable(yielded):
return _wrap_awaitable(yielded) # type: ignore
else:
raise BadYieldError(f"yielded unknown object {yielded!r}")
convert_yielded = singledispatch(convert_yielded)
|
Runner
|
python
|
astropy__astropy
|
astropy/table/bst.py
|
{
"start": 810,
"end": 1194
}
|
class ____:
"""
The opposite of MaxValue, i.e. a representation of
negative infinity.
"""
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __repr__(self):
return "MIN"
__str__ = __repr__
|
MinValue
|
python
|
fluentpython__example-code-2e
|
21-async/mojifinder/charindex.py
|
{
"start": 1219,
"end": 2443
}
|
class ____:
entries: Index
def __init__(self, start: int = 32, stop: int = STOP_CODE):
entries: Index = defaultdict(set)
for char in (chr(i) for i in range(start, stop)):
name = unicodedata.name(char, '')
if name:
for word in tokenize(name):
entries[word].add(char)
self.entries = entries
def search(self, query: str) -> set[Char]:
if words := list(tokenize(query)):
found = self.entries[words[0]]
return found.intersection(*(self.entries[w] for w in words[1:]))
else:
return set()
def format_results(chars: set[Char]) -> Iterator[str]:
for char in sorted(chars):
name = unicodedata.name(char)
code = ord(char)
yield f'U+{code:04X}\t{char}\t{name}'
def main(words: list[str]) -> None:
if not words:
print('Please give one or more words to search.')
sys.exit(2) # command line usage error
index = InvertedIndex()
chars = index.search(' '.join(words))
for line in format_results(chars):
print(line)
print('─' * 66, f'{len(chars)} found')
if __name__ == '__main__':
main(sys.argv[1:])
|
InvertedIndex
|
python
|
pyqtgraph__pyqtgraph
|
tests/test_signalproxy.py
|
{
"start": 91,
"end": 239
}
|
class ____(QtCore.QObject):
signalSend = QtCore.Signal()
def __init__(self, parent=None):
super(Sender, self).__init__(parent)
|
Sender
|
python
|
cython__cython
|
Cython/Compiler/FusedNode.py
|
{
"start": 351,
"end": 41407
}
|
class ____(StatListNode):
"""
This node replaces a function with fused arguments. It deep-copies the
function for every permutation of fused types, and allocates a new local
scope for it. It keeps track of the original function in self.node, and
the entry of the original function in the symbol table is given the
'fused_cfunction' attribute which points back to us.
Then when a function lookup occurs (to e.g. call it), the call can be
dispatched to the right function.
node FuncDefNode the original function
nodes [FuncDefNode] list of copies of node with different specific types
py_func DefNode the fused python function subscriptable from
Python space
__signatures__ A DictNode mapping signature specialization strings
to PyCFunction nodes
resulting_fused_function PyCFunction for the fused DefNode that delegates
to specializations
fused_func_assignment Assignment of the fused function to the function name
defaults_tuple TupleNode of defaults (letting PyCFunctionNode build
defaults would result in many different tuples)
specialized_pycfuncs List of synthesized pycfunction nodes for the
specializations
fused_compound_types All fused (compound) types (e.g. floating[:])
"""
__signatures__ = None
resulting_fused_function = None
fused_func_assignment = None
py_func = None
defaults_tuple = None
decorators = None
child_attrs = StatListNode.child_attrs + [
'__signatures__', 'resulting_fused_function', 'fused_func_assignment']
def __init__(self, node, env):
super().__init__(node.pos)
self.nodes = []
self.node = node
is_def = isinstance(self.node, DefNode)
if is_def:
# self.node.decorators = []
self.copy_def(env)
else:
self.copy_cdef(env)
# Perform some sanity checks. If anything fails, it's a bug
for n in self.nodes:
assert not n.entry.type.is_fused
assert not n.local_scope.return_type.is_fused
if node.return_type.is_fused:
assert not n.return_type.is_fused
if not is_def and n.cfunc_declarator.optional_arg_count:
assert n.type.op_arg_struct
node.entry.fused_cfunction = self
# Copy the nodes as AnalyseDeclarationsTransform will prepend
# self.py_func to self.stats, as we only want specialized
# CFuncDefNodes in self.nodes
self.stats = self.nodes[:]
def copy_def(self, env):
"""
Create a copy of the original def or lambda function for specialized
versions.
"""
fused_compound_types = PyrexTypes.unique(
[arg.type for arg in self.node.args if arg.type.is_fused])
fused_types = self._get_fused_base_types(fused_compound_types)
permutations = PyrexTypes.get_all_specialized_permutations(fused_types)
self.fused_compound_types = fused_compound_types
if self.node.entry in env.pyfunc_entries:
env.pyfunc_entries.remove(self.node.entry)
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# keep signature object identity for special casing in DefNode.analyse_declarations()
copied_node.entry.signature = self.node.entry.signature
self._specialize_function_args(copied_node.args, fused_to_specific)
copied_node.return_type = self.node.return_type.specialize(
fused_to_specific)
copied_node.code_object = CodeObjectNode(copied_node)
copied_node.analyse_declarations(env)
# copied_node.is_staticmethod = self.node.is_staticmethod
# copied_node.is_classmethod = self.node.is_classmethod
self.create_new_local_scope(copied_node, env, fused_to_specific)
self.specialize_copied_def(copied_node, cname, self.node.entry,
fused_to_specific, fused_compound_types)
PyrexTypes.specialize_entry(copied_node.entry, cname)
copied_node.entry.used = True
env.entries[copied_node.entry.name] = copied_node.entry
specialised_type_names = [
sarg.type.declaration_code('', for_display=True)
for (farg, sarg) in zip(self.node.args, copied_node.args)
if farg.type.is_fused
]
copied_node.name = StringEncoding.EncodedString(f"{copied_node.name}[{','.join(specialised_type_names)}]")
if not self.replace_fused_typechecks(copied_node):
break
self.orig_py_func = self.node
self.py_func = self.make_fused_cpdef(self.node, env, is_def=True)
def copy_cdef(self, env):
"""
Create a copy of the original c(p)def function for all specialized
versions.
"""
permutations = self.node.type.get_all_specialized_permutations()
# print 'Node %s has %d specializations:' % (self.node.entry.name,
# len(permutations))
# import pprint; pprint.pprint([d for cname, d in permutations])
# Prevent copying of the python function
self.orig_py_func = orig_py_func = self.node.py_func
self.node.py_func = None
if orig_py_func:
env.pyfunc_entries.remove(orig_py_func.entry)
fused_types = self.node.type.get_fused_types()
self.fused_compound_types = fused_types
new_cfunc_entries = []
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# Make the types in our CFuncType specific.
try:
type = copied_node.type.specialize(fused_to_specific)
except CannotSpecialize:
# unlike for the argument types, specializing the return type can fail
error(copied_node.pos, "Return type is a fused type that cannot "
"be determined from the function arguments")
self.py_func = None # this is just to let the compiler exit gracefully
return
entry = copied_node.entry
type.specialize_entry(entry, cname)
# Reuse existing Entries (e.g. from .pxd files).
for orig_entry in env.cfunc_entries:
if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
copied_node.entry = orig_entry
if not copied_node.entry.func_cname:
copied_node.entry.func_cname = entry.func_cname
entry = orig_entry
type = orig_entry.type
break
else:
new_cfunc_entries.append(entry)
copied_node.type = type
entry.type, type.entry = type, entry
entry.used = (entry.used or
self.node.entry.defined_in_pxd or
env.is_c_class_scope or
entry.is_cmethod)
if self.node.cfunc_declarator.optional_arg_count:
self.node.cfunc_declarator.declare_optional_arg_struct(
type, env, fused_cname=cname)
copied_node.return_type = type.return_type
self.create_new_local_scope(copied_node, env, fused_to_specific)
# Make the argument types in the CFuncDeclarator specific
self._specialize_function_args(copied_node.cfunc_declarator.args,
fused_to_specific)
# If a cpdef, declare all specialized cpdefs (this
# also calls analyse_declarations)
copied_node.declare_cpdef_wrapper(env)
if copied_node.py_func:
env.pyfunc_entries.remove(copied_node.py_func.entry)
self.specialize_copied_def(
copied_node.py_func, cname, self.node.entry.as_variable,
fused_to_specific, fused_types)
if not self.replace_fused_typechecks(copied_node):
break
# replace old entry with new entries
if self.node.entry in env.cfunc_entries:
cindex = env.cfunc_entries.index(self.node.entry)
env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
else:
env.cfunc_entries.extend(new_cfunc_entries)
if orig_py_func:
self.py_func = self.make_fused_cpdef(orig_py_func, env,
is_def=False)
else:
self.py_func = orig_py_func
def _get_fused_base_types(self, fused_compound_types):
"""
Get a list of unique basic fused types, from a list of
(possibly) compound fused types.
"""
base_types = []
seen = set()
for fused_type in fused_compound_types:
fused_type.get_fused_types(result=base_types, seen=seen)
return base_types
def _specialize_function_args(self, args, fused_to_specific):
for arg in args:
if arg.type.is_fused:
arg.type = arg.type.specialize(fused_to_specific)
if arg.type.is_memoryviewslice:
arg.type.validate_memslice_dtype(arg.pos)
if arg.annotation:
# TODO might be nice if annotations were specialized instead?
# (Or might be hard to do reliably)
arg.annotation.untyped = True
def create_new_local_scope(self, node, env, f2s):
"""
Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the
fused types are already in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version
"""
node.create_local_scope(env)
node.local_scope.fused_to_specific = f2s
# This is copied from the original function, set it to false to
# stop recursion
node.has_fused_arguments = False
self.nodes.append(node)
def specialize_copied_def(self, node, cname, py_entry, f2s, fused_compound_types):
"""Specialize the copy of a DefNode given the copied node,
the specialization cname and the original DefNode entry"""
fused_types = self._get_fused_base_types(fused_compound_types)
type_strings = [
PyrexTypes.specialization_signature_string(fused_type, f2s)
for fused_type in fused_types
]
node.specialized_signature_string = '|'.join(type_strings)
node.entry.pymethdef_cname = PyrexTypes.get_fused_cname(
cname, node.entry.pymethdef_cname)
node.entry.doc = py_entry.doc
node.entry.doc_cname = py_entry.doc_cname
def replace_fused_typechecks(self, copied_node):
"""
Branch-prune fused type checks like
if fused_t is int:
...
Returns whether an error was issued and whether we should stop in
in order to prevent a flood of errors.
"""
num_errors = Errors.get_errors_count()
transform = ParseTreeTransforms.ReplaceFusedTypeChecks(
copied_node.local_scope)
transform(copied_node)
if Errors.get_errors_count() > num_errors:
return False
return True
def _fused_instance_checks(self, normal_types, pyx_code, env):
"""
Generate Cython code for instance checks, matching an object to
specialized types.
"""
for specialized_type in normal_types:
# all_numeric = all_numeric and specialized_type.is_numeric
py_type_name = specialized_type.py_type_name()
pyx_code.put_chunk(
f"""
if isinstance(arg, {py_type_name}):
return '{specialized_type.specialization_string}'
"""
)
def _dtype_name(self, dtype):
name = str(dtype).replace('_', '__').replace(' ', '_')
if dtype.is_typedef:
name = Naming.fused_dtype_prefix + name
return name
def _dtype_type(self, dtype):
if dtype.is_typedef:
return self._dtype_name(dtype)
return str(dtype)
def _sizeof_dtype(self, dtype):
if dtype.is_pyobject:
return 'sizeof(void *)'
else:
return f"sizeof({self._dtype_type(dtype)})"
def _buffer_check_numpy_dtype_setup_cases(self, pyx_code):
"Setup some common cases to match dtypes against specializations"
with pyx_code.indenter("if kind in u'iu':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_int")
with pyx_code.indenter("elif kind == u'f':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_float")
with pyx_code.indenter("elif kind == u'c':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_complex")
def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
"""
Match a numpy dtype object to the individual specializations.
"""
self._buffer_check_numpy_dtype_setup_cases(pyx_code)
for specialized_type in pythran_types+specialized_buffer_types:
final_type = specialized_type
if specialized_type.is_pythran_expr:
specialized_type = specialized_type.org_buffer
dtype = specialized_type.dtype
itemsize_match = self._sizeof_dtype(dtype) + " == itemsize"
signed_match = f" and not ({self._dtype_name(dtype)}_is_signed ^ dtype_signed)"
dtypes = [
(dtype.is_int, pyx_code['dtype_int']),
(dtype.is_float, pyx_code['dtype_float']),
(dtype.is_complex, pyx_code['dtype_complex'])
]
for dtype_category, codewriter in dtypes:
if not dtype_category:
continue
cond = f'{itemsize_match} and (<Py_ssize_t>arg.ndim) == {specialized_type.ndim}'
if dtype.is_int:
cond += signed_match
if final_type.is_pythran_expr:
cond += ' and arg_is_pythran_compatible'
with codewriter.indenter(f"if {cond}:"):
#codewriter.putln("print 'buffer match found based on numpy dtype'")
codewriter.putln(f"return '{final_type.specialization_string}'")
def _buffer_parse_format_string_check(self, pyx_code, decl_code, specialized_type, env):
"""
For each specialized type, try to coerce the object to a memoryview
slice of that type. This means obtaining a buffer and parsing the
format string.
TODO: separate buffer acquisition from format parsing
"""
dtype = specialized_type.dtype
if specialized_type.is_buffer:
axes = [('direct', 'strided')] * specialized_type.ndim
else:
axes = specialized_type.axes
memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes)
memslice_type.create_from_py_utility_code(env)
coerce_from_py_func = memslice_type.from_py_function
decl_code.putln(
f"{Naming.memviewslice_cname} {coerce_from_py_func}(object, int)")
match = specialized_type.specialization_string
sizeof_dtype = self._sizeof_dtype(dtype)
ndim_dtype = specialized_type.ndim
# Use the memoryview object to check itemsize and ndim.
# In principle it could check more, but these are the easiest to do quickly.
pyx_code.put_chunk(
f"""
# try {dtype}
if (((itemsize == -1 and arg_as_memoryview.itemsize == {sizeof_dtype})
or itemsize == {sizeof_dtype})
and arg_as_memoryview.ndim == {ndim_dtype}):
memslice = {coerce_from_py_func}(arg_as_memoryview, 0)
if memslice.memview:
__PYX_XCLEAR_MEMVIEW(&memslice, 1)
# print 'found a match for the buffer through format parsing'
return '{match}'
else:
__pyx_PyErr_Clear()
"""
)
def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, accept_none, env):
"""
Generate Cython code to match objects to buffer specializations.
First try to get a numpy dtype object and match it against the individual
specializations. If that fails, try naively to coerce the object
to each specialization, which obtains the buffer each time and tries
to match the format string.
"""
# The first thing to find a match in this loop breaks out of the loop
pyx_code.put_chunk(
"""
""" + ("arg_is_pythran_compatible = False" if pythran_types else "") + """
if ndarray is not None:
if isinstance(arg, ndarray):
dtype = arg.dtype
""" + ("arg_is_pythran_compatible = True" if pythran_types else "") + """
elif __pyx_memoryview_check(arg):
arg_base = arg.base
if isinstance(arg_base, ndarray):
dtype = arg_base.dtype
else:
dtype = None
else:
dtype = None
itemsize = -1
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
dtype_signed = kind == u'i'
""")
pyx_code.indent(2)
if pythran_types:
pyx_code.put_chunk(
"""
# Pythran only supports the endianness of the current compiler
byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
elif byteorder == ">" and __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
if arg_is_pythran_compatible:
cur_stride = itemsize
shape = arg.shape
strides = arg.strides
for i in range(arg.ndim-1, -1, -1):
if (<Py_ssize_t>strides[i]) != cur_stride:
arg_is_pythran_compatible = False
break
cur_stride *= <Py_ssize_t> shape[i]
else:
arg_is_pythran_compatible = not (arg.flags.f_contiguous and (<Py_ssize_t>arg.ndim) > 1)
""")
self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2)
if accept_none:
# If None is acceptable, then Cython <3.0 matched None with the
# first type. This behaviour isn't ideal, but keep it for backwards
# compatibility. Better behaviour would be to see if subsequent
# arguments give a stronger match.
pyx_code.put_chunk(
f"""
if arg is None:
return '{buffer_types[0].specialization_string}'
"""
)
# creating a Cython memoryview from a Python memoryview avoids the
# need to get the buffer multiple times, and we can
# also use it to check itemsizes etc
pyx_code.put_chunk(
"""
try:
arg_as_memoryview = memoryview(arg)
except (ValueError, TypeError):
pass
""")
with pyx_code.indenter("else:"):
for specialized_type in buffer_types:
self._buffer_parse_format_string_check(
pyx_code, decl_code, specialized_type, env)
def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
"""
If we have any buffer specializations, write out some variable and type declarations.
"""
decl_code.put_chunk(
f"""
ctypedef struct {Naming.memviewslice_cname}:
void *memview
void __PYX_XCLEAR_MEMVIEW({Naming.memviewslice_cname} *, int have_gil)
bint __pyx_memoryview_check(object)
""")
pyx_code['local_variable_declarations'].put_chunk(
f"""
cdef {Naming.memviewslice_cname} memslice
cdef Py_ssize_t itemsize
cdef bint dtype_signed
cdef Py_UCS4 kind
itemsize = -1
""")
if pythran_types:
pyx_code['local_variable_declarations'].put_chunk("""
cdef bint arg_is_pythran_compatible
cdef Py_ssize_t cur_stride
cdef Py_ssize_t i
""")
pyx_code['local_variable_declarations'].put_chunk(
"""
cdef memoryview arg_as_memoryview
"""
)
seen_typedefs = set()
seen_int_dtypes = set()
seen_structs = set()
for buffer_type in all_buffer_types:
dtype = buffer_type.dtype
dtype_name = self._dtype_name(dtype)
if dtype.is_struct_or_union:
if dtype_name not in seen_structs:
seen_structs.add(dtype_name)
decl_code.putln(
f'ctypedef {dtype.kind} {dtype_name} "{dtype.empty_declaration_code()}": pass')
elif dtype.is_typedef:
if dtype_name not in seen_typedefs:
seen_typedefs.add(dtype_name)
decl_code.putln(
f'ctypedef {dtype.resolve()} {dtype_name} "{dtype.empty_declaration_code()}"')
# 'is_signed' is also needed for typedefs.
if dtype.is_int:
if str(dtype) not in seen_int_dtypes:
seen_int_dtypes.add(str(dtype))
dtype_type = self._dtype_type(dtype)
pyx_code['local_variable_declarations'].put_chunk(
f"""
cdef bint {dtype_name}_is_signed
{dtype_name}_is_signed = not (<{dtype_type}> -1 > 0)
""")
def _split_fused_types(self, arg):
"""
Specialize fused types and split into normal types and buffer types.
"""
specialized_types = PyrexTypes.get_specialized_types(arg.type)
# Prefer long over int, etc by sorting (see type classes in PyrexTypes.py)
specialized_types.sort()
seen_py_type_names = set()
normal_types, buffer_types, pythran_types = [], [], []
has_object_fallback = False
for specialized_type in specialized_types:
py_type_name = specialized_type.py_type_name()
if py_type_name:
if py_type_name in seen_py_type_names:
continue
seen_py_type_names.add(py_type_name)
if py_type_name == 'object':
has_object_fallback = True
else:
normal_types.append(specialized_type)
elif specialized_type.is_pythran_expr:
pythran_types.append(specialized_type)
elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
buffer_types.append(specialized_type)
return normal_types, buffer_types, pythran_types, has_object_fallback
def _unpack_argument(self, pyx_code, arg, arg_tuple_idx, min_positional_args, default_idx):
pyx_code.put_chunk(
f"""
# PROCESSING ARGUMENT {arg_tuple_idx}
if {arg_tuple_idx} < len(<tuple>args):
arg = (<tuple>args)[{arg_tuple_idx}]
elif kwargs is not None and '{arg.name}' in <dict>kwargs:
arg = (<dict>kwargs)['{arg.name}']
else:
"""
)
pyx_code.indent()
if arg.default:
pyx_code.putln(
f"arg = (<tuple>defaults)[{default_idx}]")
elif arg_tuple_idx < min_positional_args:
pyx_code.putln(
'raise TypeError("Expected at least %d argument%s, got %d" % ('
f'''{min_positional_args}, {'"s"' if min_positional_args != 1 else '""'}, len(<tuple>args)))'''
)
else:
pyx_code.putln(f"""raise TypeError("Missing keyword-only argument: '%s'" % "{arg.name}")""")
pyx_code.dedent()
def make_fused_cpdef(self, orig_py_func, env, is_def):
"""
This creates the function that is indexable from Python and does
runtime dispatch based on the argument types. The function gets the
arg tuple and kwargs dict (or None) and the defaults tuple
as arguments from the Binding Fused Function's tp_call.
"""
from . import TreeFragment, Code, UtilityCode
min_positional_args = (
self.node.num_required_args - self.node.num_required_kw_args
if is_def else
sum(1 for arg in self.node.args if arg.default is None)
)
pyx_code = Code.PyxCodeWriter()
decl_code = Code.PyxCodeWriter()
type_mapper = Code.PyxCodeWriter()
decl_code.put_chunk(
"""
cdef extern from *:
type __Pyx_ImportNumPyArrayTypeIfAvailable()
# from FusedFunction utility code
object __pyx_ff_match_signatures_single(dict signatures, dest_type)
object __pyx_ff_match_signatures(dict signatures, tuple dest_sig, dict sigindex)
""")
decl_code.indent()
pyx_code.put_chunk(
"""
def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex={}):
# FIXME: use a typed signature - currently fails badly because
# default arguments inherit the types we specify here!
if kwargs is not None and not kwargs:
kwargs = None
# instance check body
""")
pyx_code.indent() # indent following code to function body
pyx_code.named_insertion_point("imports")
fused_index = 0
default_idx = 0
all_buffer_types = OrderedSet()
seen_fused_types = set()
for i, arg in enumerate(self.node.args):
if arg.type.is_fused:
arg_fused_types = arg.type.get_fused_types()
if len(arg_fused_types) > 1:
raise NotImplementedError("Determination of more than one fused base "
"type per argument is not implemented.")
fused_type = arg_fused_types[0]
if arg.type.is_fused and fused_type not in seen_fused_types:
seen_fused_types.add(fused_type)
normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
self._unpack_argument(pyx_code, arg, i, min_positional_args, default_idx)
mapper_arg_types = ['object', 'type']
mapper_arg_names = ['arg']
if buffer_types or pythran_types:
mapper_arg_names.append('ndarray')
mapper_sig = ', '.join(f"{atype} {aname}" for atype, aname in zip(mapper_arg_types, mapper_arg_names))
mapper_args = ', '.join(mapper_arg_names)
mapper_decl_code = type_mapper.insertion_point()
mapper_decl_code.put_chunk(
"""
cdef extern from *:
void __pyx_PyErr_Clear "PyErr_Clear" ()
int __Pyx_Is_Little_Endian()
"""
)
mapper_decl_code.indent()
type_mapper.putln('')
type_mapper.putln("@TYPE_MAPPER_CNAME_PLACEHOLDER")
with type_mapper.indenter(f"cdef str map_fused_type({mapper_sig}):"):
type_mapper.named_insertion_point("local_variable_declarations")
if normal_types:
self._fused_instance_checks(normal_types, type_mapper, env)
if buffer_types or pythran_types:
mapper_buffer_types = OrderedSet()
mapper_buffer_types.update(buffer_types)
mapper_buffer_types.update(ty.org_buffer for ty in pythran_types)
self._buffer_declarations(type_mapper, mapper_decl_code, mapper_buffer_types, pythran_types)
self._buffer_checks(
buffer_types, pythran_types, type_mapper, mapper_decl_code,
arg.accept_none, env)
type_mapper.putln("return 'object'" if has_object_fallback else "return None")
type_mapper_impl = type_mapper.getvalue()
type_mapper.reset()
# Generate a unique name for the mapper function based on type declarations and implementation.
impl_hash = hashlib.sha256(type_mapper_impl.encode('utf-8')).hexdigest()
type_mapper_cname = (
f"__pyx_ff_map_fused_{impl_hash[:6]}"
f"_{len(mapper_arg_names)}_{len(fused_type.types)}"
f"_{PyrexTypes.type_list_identifier(fused_type.types)}"
)
type_mapper_impl = type_mapper_impl.replace(
"\n@TYPE_MAPPER_CNAME_PLACEHOLDER\n", f"\n@cname('{type_mapper_cname}')\n")
# print(''.join(f"{i:3d} {line}" for i, line in enumerate(type_mapper_impl.splitlines(keepends=True))))
env.use_utility_code(
UtilityCode.CythonUtilityCode(type_mapper_impl, name=type_mapper_cname))
decl_code.putln(f"str {type_mapper_cname}({mapper_sig})")
pyx_code.putln(f"dest_sig{fused_index} = {type_mapper_cname}({mapper_args})")
fused_index += 1
all_buffer_types.update(buffer_types)
all_buffer_types.update(ty.org_buffer for ty in pythran_types)
if arg.default:
default_idx += 1
if all_buffer_types:
env.use_utility_code(
Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c"))
env.use_utility_code(
Code.UtilityCode.load_cached("Import", "ImportExport.c"))
env.use_utility_code(
Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
pyx_code['imports'].put_chunk(
"""
cdef type ndarray
ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable()
""")
if len(seen_fused_types) == 1:
# Fast and common case: a single fused type across all arguments.
env.use_utility_code(
UtilityCode.CythonUtilityCode.load("match_signatures_single", "FusedFunction.pyx"))
pyx_code.put_chunk(
"""
return __pyx_ff_match_signatures_single(<dict> signatures, dest_sig0)
"""
)
else:
env.use_utility_code(
UtilityCode.CythonUtilityCode.load("match_signatures", "FusedFunction.pyx"))
dest_sig_tuple = ', '.join(f'dest_sig{i}' for i in range(len(seen_fused_types)))
pyx_code.put_chunk(
f"""
return __pyx_ff_match_signatures(<dict> signatures, ({dest_sig_tuple}), <dict> _fused_sigindex)
"""
)
fragment_code = pyx_code.getvalue()
# print(decl_code.getvalue())
# print(fragment_code)
# print(''.join(f"{i:3d} {line}" for i, line in enumerate(fragment_code.splitlines(keepends=True))))
from .Optimize import ConstantFolding
fragment = TreeFragment.TreeFragment(
fragment_code, level='module', pipeline=[ConstantFolding()])
ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root)
UtilityCode.declare_declarations_in_scope(
decl_code.getvalue(), env.global_scope())
ast.scope = env
# FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self'
ast.analyse_declarations(env)
py_func = ast.stats[-1] # the DefNode
self.fragment_scope = ast.scope
if isinstance(self.node, DefNode):
py_func.specialized_cpdefs = self.nodes[:]
else:
py_func.specialized_cpdefs = [n.py_func for n in self.nodes]
return py_func
def update_fused_defnode_entry(self, env):
copy_attributes = (
'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname',
'pymethdef_cname', 'doc', 'doc_cname', 'is_member',
'scope'
)
entry = self.py_func.entry
for attr in copy_attributes:
setattr(entry, attr,
getattr(self.orig_py_func.entry, attr))
self.py_func.name = self.orig_py_func.name
self.py_func.doc = self.orig_py_func.doc
env.entries.pop('__pyx_fused_cpdef', None)
if isinstance(self.node, DefNode):
env.entries[entry.name] = entry
else:
env.entries[entry.name].as_variable = entry
env.pyfunc_entries.append(entry)
self.py_func.entry.fused_cfunction = self
def_nodes = []
for node in self.nodes:
if isinstance(self.node, DefNode):
def_nodes.append(node)
node.fused_py_func = self.py_func
else:
def_nodes.append(node.py_func)
node.py_func.fused_py_func = self.py_func
node.entry.as_variable = entry
self.synthesize_defnodes(def_nodes)
def analyse_expressions(self, env):
"""
Analyse the expressions. Take care to only evaluate default arguments
once and clone the result for all specializations
"""
for fused_compound_type in self.fused_compound_types:
for fused_type in fused_compound_type.get_fused_types():
for specialization_type in fused_type.types:
if specialization_type.is_complex:
specialization_type.create_declaration_utility_code(env)
if self.py_func:
self.__signatures__ = self.__signatures__.analyse_expressions(env)
self.py_func = self.py_func.analyse_expressions(env)
self.resulting_fused_function = self.resulting_fused_function.analyse_expressions(env)
self.fused_func_assignment = self.fused_func_assignment.analyse_expressions(env)
self.defaults = defaults = []
for arg in self.node.args:
if arg.default:
arg.default = arg.default.analyse_expressions(env)
if arg.default.is_literal:
defaults.append(copy.copy(arg.default))
else:
# coerce the argument to temp since CloneNode really requires a temp
defaults.append(ProxyNode(arg.default.coerce_to_temp(env)))
else:
defaults.append(None)
for i, stat in enumerate(self.stats):
stat = self.stats[i] = stat.analyse_expressions(env)
if isinstance(stat, FuncDefNode) and stat is not self.py_func:
# the dispatcher specifically doesn't want its defaults overriding
for arg, default in zip(stat.args, defaults):
if default is not None:
if default.is_literal:
arg.default = default.coerce_to(arg.type, env)
else:
arg.default = CloneNode(default).analyse_expressions(env).coerce_to(arg.type, env)
if self.py_func:
args = [CloneNode(default) for default in defaults if default]
self.defaults_tuple = TupleNode(self.pos, args=args)
self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
self.defaults_tuple = ProxyNode(self.defaults_tuple)
fused_func = self.resulting_fused_function.arg
fused_func.defaults_tuple = CloneNode(self.defaults_tuple)
for i, pycfunc in enumerate(self.specialized_pycfuncs):
pycfunc = self.specialized_pycfuncs[i] = pycfunc.analyse_types(env)
pycfunc.defaults_tuple = CloneNode(self.defaults_tuple)
return self
def synthesize_defnodes(self, nodes):
"""
Create the __signatures__ dict of PyCFunctionNode specializations.
"""
# For the moment, fused functions do not support METH_FASTCALL
for node in nodes:
node.entry.signature.use_fastcall = False
signatures = [StringEncoding.EncodedString(node.specialized_signature_string)
for node in nodes]
keys = [ExprNodes.UnicodeNode(node.pos, value=sig)
for node, sig in zip(nodes, signatures)]
values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True)
for node in nodes]
self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values))
self.specialized_pycfuncs = values
for pycfuncnode in values:
pycfuncnode.is_specialization = True
# use code object from first defnode to get as close to a correct signature as possible
self.py_func.code_object = CodeObjectNode(nodes[0])
def generate_function_definitions(self, env, code):
if self.py_func:
self.py_func.pymethdef_required = True
self.fused_func_assignment.generate_function_definitions(env, code)
from . import Options
for stat in self.stats:
if isinstance(stat, FuncDefNode) and (
stat.entry.used or
(Options.cimport_from_pyx and not stat.entry.visibility == 'extern')):
code.mark_pos(stat.pos)
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
# Note: all def function specialization are wrapped in PyCFunction
# nodes in the self.__signatures__ dictnode.
for default in self.defaults:
if default is not None:
default.generate_evaluation_code(code)
if self.py_func:
self.defaults_tuple.generate_evaluation_code(code)
super().generate_execution_code(code)
if self.__signatures__:
signatures = self.__signatures__
signatures.generate_evaluation_code(code)
fused_func = self.resulting_fused_function
fused_func.generate_evaluation_code(code)
code.putln(
f"((__pyx_FusedFunctionObject *) {fused_func.result()})->__signatures__ = {signatures.result()};")
signatures.generate_giveref(code)
signatures.generate_post_assignment_code(code)
signatures.free_temps(code)
self.fused_func_assignment.generate_execution_code(code)
# Dispose of results
fused_func.generate_disposal_code(code)
fused_func.free_temps(code)
self.defaults_tuple.generate_disposal_code(code)
self.defaults_tuple.free_temps(code)
for default in self.defaults:
if default is not None:
default.generate_disposal_code(code)
default.free_temps(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
|
FusedCFuncDefNode
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/io_manager.py
|
{
"start": 2907,
"end": 6027
}
|
class ____(ConfigurableIOManager):
"""Persistent IO manager using GCS for storage.
Serializes objects via pickling. Suitable for objects storage for distributed executors, so long
as each execution node has network connectivity and credentials for GCS and the backing bucket.
Assigns each op output to a unique filepath containing run ID, step key, and output name.
Assigns each asset to a single filesystem path, at ``<base_dir>/<asset_key>``. If the asset key
has multiple components, the final component is used as the name of the file, and the preceding
components as parent directories under the base_dir.
Subsequent materializations of an asset will overwrite previous materializations of that asset.
With a base directory of ``/my/base/path``, an asset with key
``AssetKey(["one", "two", "three"])`` would be stored in a file called ``three`` in a directory
with path ``/my/base/path/one/two/``.
Example usage:
1. Attach this IO manager to a set of assets.
.. code-block:: python
from dagster import asset, Definitions
from dagster_gcp.gcs import GCSPickleIOManager, GCSResource
@asset
def asset1():
# create df ...
return df
@asset
def asset2(asset1):
return asset1[:5]
Definitions(
assets=[asset1, asset2],
resources={
"io_manager": GCSPickleIOManager(
gcs_bucket="my-cool-bucket",
gcs_prefix="my-cool-prefix",
gcs=GCSResource(project="my-cool-project")
),
}
)
2. Attach this IO manager to your job to make it available to your ops.
.. code-block:: python
from dagster import job
from dagster_gcp.gcs import GCSPickleIOManager, GCSResource
@job(
resource_defs={
"io_manager": GCSPickleIOManager(
gcs=GCSResource(project="my-cool-project")
gcs_bucket="my-cool-bucket",
gcs_prefix="my-cool-prefix"
),
}
)
def my_job():
...
"""
gcs: ResourceDependency[GCSResource]
gcs_bucket: str = Field(description="GCS bucket to store files")
gcs_prefix: str = Field(default="dagster", description="Prefix to add to all file paths")
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@property
@cached_method
def _internal_io_manager(self) -> PickledObjectGCSIOManager:
return PickledObjectGCSIOManager(
bucket=self.gcs_bucket, client=self.gcs.get_client(), prefix=self.gcs_prefix
)
def load_input(self, context: InputContext) -> Any:
return self._internal_io_manager.load_input(context)
def handle_output(self, context: OutputContext, obj: Any) -> None:
self._internal_io_manager.handle_output(context, obj)
@deprecated(
breaking_version="2.0",
additional_warn_text="Please use GCSPickleIOManager instead.",
)
|
GCSPickleIOManager
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/model_query_annotated.py
|
{
"start": 911,
"end": 1445
}
|
class ____:
a: Annotated[Optional[float], Color.RED] = None
b: Annotated[Optional[float], Color.BLUE] = None
x: Annotated[Optional[float], Color.RED, "foo"] = None
def test2_alarm1(c: Test2_C) -> None:
c.a = 1.01
_test_sink(c.a)
def test2_alarm2(c: Test2_C) -> None:
c.x = 1.01
_test_sink(c.x)
def test2_noalarm1(c: Test2_C) -> None:
c.b = 1.01
_test_sink(c.b)
def test3_noalarm1(a: str, b: int) -> None:
_test_sink(a)
def test3_noalarm2(a: str, b: int) -> None:
_test_sink(b)
|
Test2_C
|
python
|
huggingface__transformers
|
src/transformers/models/mamba2/modeling_mamba2.py
|
{
"start": 41910,
"end": 47769
}
|
class ____(Mamba2PreTrainedModel, GenerationMixin):
_tied_weights_keys = {}
def __init__(self, config):
super().__init__(config)
self.backbone = Mamba2Model(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.backbone.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.backbone.set_input_embeddings(new_embeddings)
def prepare_inputs_for_generation(
self,
input_ids,
inputs_embeds=None,
use_cache=None,
cache_params: Optional[Mamba2Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
):
# Overwritten -- uses `cache_params` as opposed to `past_key_values`
model_inputs = {"input_ids": input_ids.contiguous()}
if use_cache and cache_params is None:
# we initialize the `cache_position` to full size of `conv_states` at prefill stage
# considering padding will be applied when input length is shorter, and truncation
# will be applied when it is longer, so it will be equivalent to always have it match
# the length of `cache_params.conv_states`, which is `config.conv_kernel`
cache_position = torch.arange(0, self.backbone.config.conv_kernel, device=input_ids.device)
if inputs_embeds is not None:
model_inputs = {"inputs_embeds": inputs_embeds}
max_batch_size = inputs_embeds.size(0)
else:
max_batch_size = input_ids.size(0)
cache_params = Mamba2Cache(self.backbone.config, max_batch_size, device=self.device, dtype=self.dtype)
if use_cache and cache_position[0] > 0:
model_inputs["input_ids"] = input_ids[:, -1].unsqueeze(-1).contiguous()
attention_mask = None
if not use_cache and inputs_embeds is not None:
model_inputs = {"inputs_embeds": inputs_embeds}
model_inputs.update(
{
"cache_params": cache_params,
"use_cache": use_cache,
"cache_position": cache_position,
"attention_mask": attention_mask,
}
)
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_params: Optional[Mamba2Cache] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs, # for now we need this for generation and loss_function
) -> Union[tuple, Mamba2CausalLMOutput]:
r"""
cache_params (`Mamba2Cache`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
The position of the current input in the cache. This is used to ensure that the cache is correctly updated.
If `cache_params` is passed, `cache_position` should also be passed.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
mamba2_outputs = self.backbone(
input_ids,
cache_params=cache_params,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
use_cache=use_cache,
cache_position=cache_position,
attention_mask=attention_mask,
)
hidden_states = mamba2_outputs[0]
# Only compute necessary logits
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :].to(self.lm_head.weight.dtype)).float()
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + mamba2_outputs[1:]
return ((loss,) + output) if loss is not None else output
return Mamba2CausalLMOutput(
loss=loss,
logits=logits,
cache_params=mamba2_outputs.cache_params,
hidden_states=mamba2_outputs.hidden_states,
)
__all__ = ["Mamba2ForCausalLM", "Mamba2Model", "Mamba2PreTrainedModel"]
|
Mamba2ForCausalLM
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/hooks/test_batch_client.py
|
{
"start": 17914,
"end": 21383
}
|
class ____:
@mock.patch.dict("os.environ", AWS_DEFAULT_REGION=AWS_REGION)
@mock.patch.dict("os.environ", AWS_ACCESS_KEY_ID=AWS_ACCESS_KEY_ID)
@mock.patch.dict("os.environ", AWS_SECRET_ACCESS_KEY=AWS_SECRET_ACCESS_KEY)
def setup_method(self, method):
self.batch_client = BatchClientHook(aws_conn_id="airflow_test", region_name=AWS_REGION)
# We're mocking all actual AWS calls and don't need a connection. This
# avoids an Airflow warning about connection cannot be found.
self.batch_client.get_connection = lambda _: None
def test_init(self):
assert self.batch_client.max_retries == self.batch_client.MAX_RETRIES
assert self.batch_client.status_retries == self.batch_client.STATUS_RETRIES
assert self.batch_client.region_name == AWS_REGION
assert self.batch_client.aws_conn_id == "airflow_test"
def test_add_jitter(self):
minima = 0
width = 5
result = self.batch_client.add_jitter(0, width=width, minima=minima)
assert result >= minima
assert result <= width
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.random.uniform")
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.time.sleep")
def test_delay_defaults(self, mock_sleep, mock_uniform):
assert BatchClientHook.DEFAULT_DELAY_MIN == 1
assert BatchClientHook.DEFAULT_DELAY_MAX == 10
mock_uniform.return_value = 0
self.batch_client.delay()
mock_uniform.assert_called_once_with(
BatchClientHook.DEFAULT_DELAY_MIN, BatchClientHook.DEFAULT_DELAY_MAX
)
mock_sleep.assert_called_once_with(0)
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.random.uniform")
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.time.sleep")
def test_delay_with_zero(self, mock_sleep, mock_uniform):
self.batch_client.delay(0)
mock_uniform.assert_called_once_with(0, 1) # in add_jitter
mock_sleep.assert_called_once_with(mock_uniform.return_value)
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.random.uniform")
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.time.sleep")
def test_delay_with_int(self, mock_sleep, mock_uniform):
self.batch_client.delay(5)
mock_uniform.assert_called_once_with(4, 6) # in add_jitter
mock_sleep.assert_called_once_with(mock_uniform.return_value)
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.random.uniform")
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.time.sleep")
def test_delay_with_float(self, mock_sleep, mock_uniform):
self.batch_client.delay(5.0)
mock_uniform.assert_called_once_with(4.0, 6.0) # in add_jitter
mock_sleep.assert_called_once_with(mock_uniform.return_value)
@pytest.mark.parametrize(
("tries", "lower", "upper"),
[
(0, 0, 1),
(1, 0, 2),
(2, 0, 3),
(3, 1, 5),
(4, 2, 7),
(5, 3, 11),
(6, 4, 14),
(7, 6, 19),
(8, 8, 25),
(9, 10, 31),
(45, 200, 600), # > 40 tries invokes maximum delay allowed
],
)
def test_exponential_delay(self, tries, lower, upper):
result = self.batch_client.exponential_delay(tries)
assert result >= lower
assert result <= upper
|
TestBatchClientDelays
|
python
|
PyCQA__pylint
|
tests/functional/n/non/non_str_assignment_to_dunder_name.py
|
{
"start": 214,
"end": 1457
}
|
class ____():
pass
def example_function():
pass
def returns_str():
return "abcd"
def returns_int():
return 0
def returns_tuple():
return 0, "abc"
# Might not be thorough if same hash seed is used in testing...
def returns_random_type():
if random.randint(0, 1) > 0:
return 0
return "abc"
ExampleClass.__name__ = 1 # [non-str-assignment-to-dunder-name]
ExampleClass.__name__ = True # [non-str-assignment-to-dunder-name]
ExampleClass.__name__ = returns_tuple() # [non-str-assignment-to-dunder-name]
ExampleClass.__name__ = returns_int() # [non-str-assignment-to-dunder-name]
ExampleClass.__name__ = "foo"
ExampleClass.__name__ = returns_str()
ExampleClass.__name__ = returns_random_type()
ExampleClass.__name__ = Unknown
example_function.__name__ = 1 # [non-str-assignment-to-dunder-name]
example_function.__name__ = True # [non-str-assignment-to-dunder-name]
example_function.__name__ = returns_tuple() # [non-str-assignment-to-dunder-name]
example_function.__name__ = returns_int() # [non-str-assignment-to-dunder-name]
example_function.__name__ = "foo"
example_function.__name__ = returns_str()
example_function.__name__ = returns_random_type()
example_function.__name__ = Unknown
|
ExampleClass
|
python
|
pydantic__pydantic
|
pydantic/types.py
|
{
"start": 17708,
"end": 33703
}
|
class ____(annotated_types.GroupedMetadata):
"""!!! abstract "Usage Documentation"
[String types](./standard_library_types.md#strings)
A field metadata class to apply constraints to `str` types.
Use this class as an annotation via [`Annotated`](https://docs.python.org/3/library/typing.html#typing.Annotated), as seen below.
Attributes:
strip_whitespace: Whether to remove leading and trailing whitespace.
to_upper: Whether to convert the string to uppercase.
to_lower: Whether to convert the string to lowercase.
strict: Whether to validate the string in strict mode.
min_length: The minimum length of the string.
max_length: The maximum length of the string.
pattern: A regex pattern that the string must match.
Example:
```python
from typing import Annotated
from pydantic.types import StringConstraints
ConstrainedStr = Annotated[str, StringConstraints(min_length=1, max_length=10)]
```
"""
strip_whitespace: bool | None = None
to_upper: bool | None = None
to_lower: bool | None = None
strict: bool | None = None
min_length: int | None = None
max_length: int | None = None
pattern: str | Pattern[str] | None = None
def __iter__(self) -> Iterator[BaseMetadata]:
if self.min_length is not None:
yield MinLen(self.min_length)
if self.max_length is not None:
yield MaxLen(self.max_length)
if self.strict is not None:
yield Strict(self.strict)
if (
self.strip_whitespace is not None
or self.pattern is not None
or self.to_lower is not None
or self.to_upper is not None
):
yield _fields.pydantic_general_metadata(
strip_whitespace=self.strip_whitespace,
to_upper=self.to_upper,
to_lower=self.to_lower,
pattern=self.pattern,
)
def constr(
*,
strip_whitespace: bool | None = None,
to_upper: bool | None = None,
to_lower: bool | None = None,
strict: bool | None = None,
min_length: int | None = None,
max_length: int | None = None,
pattern: str | Pattern[str] | None = None,
) -> type[str]:
"""
!!! warning "Discouraged"
This function is **discouraged** in favor of using
[`Annotated`](https://docs.python.org/3/library/typing.html#typing.Annotated) with
[`StringConstraints`][pydantic.types.StringConstraints] instead.
This function will be **deprecated** in Pydantic 3.0.
The reason is that `constr` returns a type, which doesn't play well with static analysis tools.
=== ":x: Don't do this"
```python
from pydantic import BaseModel, constr
class Foo(BaseModel):
bar: constr(strip_whitespace=True, to_upper=True, pattern=r'^[A-Z]+$')
```
=== ":white_check_mark: Do this"
```python
from typing import Annotated
from pydantic import BaseModel, StringConstraints
class Foo(BaseModel):
bar: Annotated[
str,
StringConstraints(
strip_whitespace=True, to_upper=True, pattern=r'^[A-Z]+$'
),
]
```
A wrapper around `str` that allows for additional constraints.
```python
from pydantic import BaseModel, constr
class Foo(BaseModel):
bar: constr(strip_whitespace=True, to_upper=True)
foo = Foo(bar=' hello ')
print(foo)
#> bar='HELLO'
```
Args:
strip_whitespace: Whether to remove leading and trailing whitespace.
to_upper: Whether to turn all characters to uppercase.
to_lower: Whether to turn all characters to lowercase.
strict: Whether to validate the string in strict mode.
min_length: The minimum length of the string.
max_length: The maximum length of the string.
pattern: A regex pattern to validate the string against.
Returns:
The wrapped string type.
""" # noqa: D212
return Annotated[ # pyright: ignore[reportReturnType]
str,
StringConstraints(
strip_whitespace=strip_whitespace,
to_upper=to_upper,
to_lower=to_lower,
strict=strict,
min_length=min_length,
max_length=max_length,
pattern=pattern,
),
]
StrictStr = Annotated[str, Strict()]
"""A string that must be validated in strict mode."""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ COLLECTION TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
HashableItemType = TypeVar('HashableItemType', bound=Hashable)
def conset(
item_type: type[HashableItemType], *, min_length: int | None = None, max_length: int | None = None
) -> type[set[HashableItemType]]:
"""A wrapper around `typing.Set` that allows for additional constraints.
Args:
item_type: The type of the items in the set.
min_length: The minimum length of the set.
max_length: The maximum length of the set.
Returns:
The wrapped set type.
"""
return Annotated[set[item_type], annotated_types.Len(min_length or 0, max_length)] # pyright: ignore[reportReturnType]
def confrozenset(
item_type: type[HashableItemType], *, min_length: int | None = None, max_length: int | None = None
) -> type[frozenset[HashableItemType]]:
"""A wrapper around `typing.FrozenSet` that allows for additional constraints.
Args:
item_type: The type of the items in the frozenset.
min_length: The minimum length of the frozenset.
max_length: The maximum length of the frozenset.
Returns:
The wrapped frozenset type.
"""
return Annotated[frozenset[item_type], annotated_types.Len(min_length or 0, max_length)] # pyright: ignore[reportReturnType]
AnyItemType = TypeVar('AnyItemType')
def conlist(
item_type: type[AnyItemType],
*,
min_length: int | None = None,
max_length: int | None = None,
unique_items: bool | None = None,
) -> type[list[AnyItemType]]:
"""A wrapper around [`list`][] that adds validation.
Args:
item_type: The type of the items in the list.
min_length: The minimum length of the list. Defaults to None.
max_length: The maximum length of the list. Defaults to None.
unique_items: Whether the items in the list must be unique. Defaults to None.
!!! warning Deprecated
The `unique_items` parameter is deprecated, use `Set` instead.
See [this issue](https://github.com/pydantic/pydantic-core/issues/296) for more details.
Returns:
The wrapped list type.
"""
if unique_items is not None:
raise PydanticUserError(
(
'`unique_items` is removed, use `Set` instead'
'(this feature is discussed in https://github.com/pydantic/pydantic-core/issues/296)'
),
code='removed-kwargs',
)
return Annotated[list[item_type], annotated_types.Len(min_length or 0, max_length)] # pyright: ignore[reportReturnType]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~ IMPORT STRING TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AnyType = TypeVar('AnyType')
if TYPE_CHECKING:
ImportString = Annotated[AnyType, ...]
else:
class ImportString:
"""A type that can be used to import a Python object from a string.
`ImportString` expects a string and loads the Python object importable at that dotted path.
Attributes of modules may be separated from the module by `:` or `.`, e.g. if `'math:cos'` is provided,
the resulting field value would be the function `cos`. If a `.` is used and both an attribute and submodule
are present at the same path, the module will be preferred.
On model instantiation, pointers will be evaluated and imported. There is
some nuance to this behavior, demonstrated in the examples below.
```python
import math
from pydantic import BaseModel, Field, ImportString, ValidationError
class ImportThings(BaseModel):
obj: ImportString
# A string value will cause an automatic import
my_cos = ImportThings(obj='math.cos')
# You can use the imported function as you would expect
cos_of_0 = my_cos.obj(0)
assert cos_of_0 == 1
# A string whose value cannot be imported will raise an error
try:
ImportThings(obj='foo.bar')
except ValidationError as e:
print(e)
'''
1 validation error for ImportThings
obj
Invalid python path: No module named 'foo.bar' [type=import_error, input_value='foo.bar', input_type=str]
'''
# Actual python objects can be assigned as well
my_cos = ImportThings(obj=math.cos)
my_cos_2 = ImportThings(obj='math.cos')
my_cos_3 = ImportThings(obj='math:cos')
assert my_cos == my_cos_2 == my_cos_3
# You can set default field value either as Python object:
class ImportThingsDefaultPyObj(BaseModel):
obj: ImportString = math.cos
# or as a string value (but only if used with `validate_default=True`)
class ImportThingsDefaultString(BaseModel):
obj: ImportString = Field(default='math.cos', validate_default=True)
my_cos_default1 = ImportThingsDefaultPyObj()
my_cos_default2 = ImportThingsDefaultString()
assert my_cos_default1.obj == my_cos_default2.obj == math.cos
# note: this will not work!
class ImportThingsMissingValidateDefault(BaseModel):
obj: ImportString = 'math.cos'
my_cos_default3 = ImportThingsMissingValidateDefault()
assert my_cos_default3.obj == 'math.cos' # just string, not evaluated
```
Serializing an `ImportString` type to json is also possible.
```python
from pydantic import BaseModel, ImportString
class ImportThings(BaseModel):
obj: ImportString
# Create an instance
m = ImportThings(obj='math.cos')
print(m)
#> obj=<built-in function cos>
print(m.model_dump_json())
#> {"obj":"math.cos"}
```
"""
@classmethod
def __class_getitem__(cls, item: AnyType) -> AnyType:
return Annotated[item, cls()]
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[Any], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
serializer = core_schema.plain_serializer_function_ser_schema(cls._serialize, when_used='json')
if cls is source:
# Treat bare usage of ImportString (`schema is None`) as the same as ImportString[Any]
return core_schema.no_info_plain_validator_function(
function=_validators.import_string, serialization=serializer
)
else:
return core_schema.no_info_before_validator_function(
function=_validators.import_string, schema=handler(source), serialization=serializer
)
@classmethod
def __get_pydantic_json_schema__(cls, cs: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
return handler(core_schema.str_schema())
@staticmethod
def _serialize(v: Any) -> str:
if isinstance(v, ModuleType):
return v.__name__
elif hasattr(v, '__module__') and hasattr(v, '__name__'):
return f'{v.__module__}.{v.__name__}'
# Handle special cases for sys.XXX streams
# if we see more of these, we should consider a more general solution
elif hasattr(v, 'name'):
if v.name == '<stdout>':
return 'sys.stdout'
elif v.name == '<stdin>':
return 'sys.stdin'
elif v.name == '<stderr>':
return 'sys.stderr'
return v
def __repr__(self) -> str:
return 'ImportString'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DECIMAL TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def condecimal(
*,
strict: bool | None = None,
gt: int | Decimal | None = None,
ge: int | Decimal | None = None,
lt: int | Decimal | None = None,
le: int | Decimal | None = None,
multiple_of: int | Decimal | None = None,
max_digits: int | None = None,
decimal_places: int | None = None,
allow_inf_nan: bool | None = None,
) -> type[Decimal]:
"""
!!! warning "Discouraged"
This function is **discouraged** in favor of using
[`Annotated`](https://docs.python.org/3/library/typing.html#typing.Annotated) with
[`Field`][pydantic.fields.Field] instead.
This function will be **deprecated** in Pydantic 3.0.
The reason is that `condecimal` returns a type, which doesn't play well with static analysis tools.
=== ":x: Don't do this"
```python
from pydantic import BaseModel, condecimal
class Foo(BaseModel):
bar: condecimal(strict=True, allow_inf_nan=True)
```
=== ":white_check_mark: Do this"
```python
from decimal import Decimal
from typing import Annotated
from pydantic import BaseModel, Field
class Foo(BaseModel):
bar: Annotated[Decimal, Field(strict=True, allow_inf_nan=True)]
```
A wrapper around Decimal that adds validation.
Args:
strict: Whether to validate the value in strict mode. Defaults to `None`.
gt: The value must be greater than this. Defaults to `None`.
ge: The value must be greater than or equal to this. Defaults to `None`.
lt: The value must be less than this. Defaults to `None`.
le: The value must be less than or equal to this. Defaults to `None`.
multiple_of: The value must be a multiple of this. Defaults to `None`.
max_digits: The maximum number of digits. Defaults to `None`.
decimal_places: The number of decimal places. Defaults to `None`.
allow_inf_nan: Whether to allow infinity and NaN. Defaults to `None`.
```python
from decimal import Decimal
from pydantic import BaseModel, ValidationError, condecimal
class ConstrainedExample(BaseModel):
constrained_decimal: condecimal(gt=Decimal('1.0'))
m = ConstrainedExample(constrained_decimal=Decimal('1.1'))
print(repr(m))
#> ConstrainedExample(constrained_decimal=Decimal('1.1'))
try:
ConstrainedExample(constrained_decimal=Decimal('0.9'))
except ValidationError as e:
print(e.errors())
'''
[
{
'type': 'greater_than',
'loc': ('constrained_decimal',),
'msg': 'Input should be greater than 1.0',
'input': Decimal('0.9'),
'ctx': {'gt': Decimal('1.0')},
'url': 'https://errors.pydantic.dev/2/v/greater_than',
}
]
'''
```
""" # noqa: D212
return Annotated[ # pyright: ignore[reportReturnType]
Decimal,
Strict(strict) if strict is not None else None,
annotated_types.Interval(gt=gt, ge=ge, lt=lt, le=le),
annotated_types.MultipleOf(multiple_of) if multiple_of is not None else None,
_fields.pydantic_general_metadata(max_digits=max_digits, decimal_places=decimal_places),
AllowInfNan(allow_inf_nan) if allow_inf_nan is not None else None,
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ UUID TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@_dataclasses.dataclass(**_internal_dataclass.slots_true)
|
StringConstraints
|
python
|
wandb__wandb
|
wandb/sdk/launch/environment/gcp_environment.py
|
{
"start": 1610,
"end": 12715
}
|
class ____(AbstractEnvironment):
"""GCP Environment.
Attributes:
region: The GCP region.
"""
region: str
def __init__(
self,
region: str,
) -> None:
"""Initialize the GCP environment.
Arguments:
region: The GCP region.
verify: Whether to verify the credentials, region, and project.
Raises:
LaunchError: If verify is True and the environment is not properly
configured.
"""
super().__init__()
_logger.info(f"Initializing GcpEnvironment in region {region}")
self.region: str = region
self._project = ""
@classmethod
def from_config(cls, config: dict) -> "GcpEnvironment":
"""Create a GcpEnvironment from a config dictionary.
Arguments:
config: The config dictionary.
Returns:
GcpEnvironment: The GcpEnvironment.
"""
if config.get("type") != "gcp":
raise LaunchError(
f"Could not create GcpEnvironment from config. Expected type 'gcp' "
f"but got '{config.get('type')}'."
)
region = config.get("region", None)
if not region:
raise LaunchError(
"Could not create GcpEnvironment from config. Missing 'region' field."
)
return cls(region=region)
@classmethod
def from_default(
cls,
) -> "GcpEnvironment":
"""Create a GcpEnvironment from the default configuration.
Returns:
GcpEnvironment: The GcpEnvironment.
"""
region = get_default_region()
if region is None:
raise LaunchError(
"Could not create GcpEnvironment from user's gcloud configuration. "
"Please set the default region with `gcloud config set compute/region` "
"or set the environment variable {GCP_REGION_ENV_VAR}. "
"Alternatively, you may specify the region explicitly in your "
"wandb launch configuration at `$HOME/.config/wandb/launch-config.yaml`. "
"See https://docs.wandb.ai/guides/launch/run-agent#environments for more information."
)
return cls(region=region)
@property
def project(self) -> str:
"""Get the name of the gcp project associated with the credentials.
Returns:
str: The name of the gcp project.
Raises:
LaunchError: If the launch environment cannot be verified.
"""
return self._project
async def get_credentials(self) -> google.auth.credentials.Credentials: # type: ignore
"""Get the GCP credentials.
Uses google.auth.default() to get the credentials. If the credentials
are invalid, this method will refresh them. If the credentials are
still invalid after refreshing, this method will raise an error.
Returns:
google.auth.credentials.Credentials: The GCP credentials.
Raises:
LaunchError: If the GCP credentials are invalid.
"""
_logger.debug("Getting GCP credentials")
# TODO: Figure out a minimal set of scopes.
try:
google_auth_default = event_loop_thread_exec(google.auth.default)
creds, project = await google_auth_default()
if not self._project:
self._project = project
_logger.debug("Refreshing GCP credentials")
await event_loop_thread_exec(creds.refresh)(
google.auth.transport.requests.Request()
)
except google.auth.exceptions.DefaultCredentialsError as e:
raise LaunchError(
"No Google Cloud Platform credentials found. Please run "
"`gcloud auth application-default login` or set the environment "
"variable GOOGLE_APPLICATION_CREDENTIALS to the path of a valid "
"service account key file."
) from e
except google.auth.exceptions.RefreshError as e:
raise LaunchError(
"Could not refresh Google Cloud Platform credentials. Please run "
"`gcloud auth application-default login` or set the environment "
"variable GOOGLE_APPLICATION_CREDENTIALS to the path of a valid "
"service account key file."
) from e
if not creds.valid:
raise LaunchError(
"Invalid Google Cloud Platform credentials. Please run "
"`gcloud auth application-default login` or set the environment "
"variable GOOGLE_APPLICATION_CREDENTIALS to the path of a valid "
"service account key file."
)
return creds
async def verify(self) -> None:
"""Verify the credentials, region, and project.
Credentials and region are verified by calling get_credentials(). The
region and is verified by calling the compute API.
Raises:
LaunchError: If the credentials, region, or project are invalid.
Returns:
None
"""
_logger.debug("Verifying GCP environment")
await self.get_credentials()
async def verify_storage_uri(self, uri: str) -> None:
"""Verify that a storage URI is valid.
Arguments:
uri: The storage URI.
Raises:
LaunchError: If the storage URI is invalid.
"""
match = GCS_URI_RE.match(uri)
if not match:
raise LaunchError(f"Invalid GCS URI: {uri}")
bucket = match.group(1)
cloud_storage_client = event_loop_thread_exec(google.cloud.storage.Client)
try:
credentials = await self.get_credentials()
storage_client = await cloud_storage_client(credentials=credentials)
bucket = await event_loop_thread_exec(storage_client.get_bucket)(bucket)
except google.api_core.exceptions.GoogleAPICallError as e:
raise LaunchError(
f"Failed verifying storage uri {uri}: bucket {bucket} does not exist."
) from e
except google.api_core.exceptions.Forbidden as e:
raise LaunchError(
f"Failed verifying storage uri {uri}: bucket {bucket} is not accessible. Please check your permissions and try again."
) from e
async def upload_file(self, source: str, destination: str) -> None:
"""Upload a file to GCS.
Arguments:
source: The path to the local file.
destination: The path to the GCS file.
Raises:
LaunchError: If the file cannot be uploaded.
"""
_logger.debug(f"Uploading file {source} to {destination}")
_err_prefix = f"Could not upload file {source} to GCS destination {destination}"
if not os.path.isfile(source):
raise LaunchError(f"{_err_prefix}: File {source} does not exist.")
match = GCS_URI_RE.match(destination)
if not match:
raise LaunchError(f"{_err_prefix}: Invalid GCS URI: {destination}")
bucket = match.group(1)
key = match.group(2).lstrip("/")
google_storage_client = event_loop_thread_exec(google.cloud.storage.Client)
credentials = await self.get_credentials()
try:
storage_client = await google_storage_client(credentials=credentials)
bucket = await event_loop_thread_exec(storage_client.bucket)(bucket)
blob = await event_loop_thread_exec(bucket.blob)(key)
await event_loop_thread_exec(blob.upload_from_filename)(source)
except google.api_core.exceptions.GoogleAPICallError as e:
resp = e.response
assert resp is not None
try:
message = resp.json()["error"]["message"]
except Exception:
message = str(resp)
raise LaunchError(f"{_err_prefix}: {message}") from e
async def upload_dir(self, source: str, destination: str) -> None:
"""Upload a directory to GCS.
Arguments:
source: The path to the local directory.
destination: The path to the GCS directory.
Raises:
LaunchError: If the directory cannot be uploaded.
"""
_logger.debug(f"Uploading directory {source} to {destination}")
_err_prefix = (
f"Could not upload directory {source} to GCS destination {destination}"
)
if not os.path.isdir(source):
raise LaunchError(f"{_err_prefix}: Directory {source} does not exist.")
match = GCS_URI_RE.match(destination)
if not match:
raise LaunchError(f"{_err_prefix}: Invalid GCS URI: {destination}")
bucket = match.group(1)
key = match.group(2).lstrip("/")
google_storage_client = event_loop_thread_exec(google.cloud.storage.Client)
credentials = await self.get_credentials()
try:
storage_client = await google_storage_client(credentials=credentials)
bucket = await event_loop_thread_exec(storage_client.bucket)(bucket)
for root, _, files in os.walk(source):
for file in files:
local_path = os.path.join(root, file)
gcs_path = os.path.join(
key, os.path.relpath(local_path, source)
).replace("\\", "/")
blob = await event_loop_thread_exec(bucket.blob)(gcs_path)
await event_loop_thread_exec(blob.upload_from_filename)(local_path)
except google.api_core.exceptions.GoogleAPICallError as e:
resp = e.response
assert resp is not None
try:
message = resp.json()["error"]["message"]
except Exception:
message = str(resp)
raise LaunchError(f"{_err_prefix}: {message}") from e
except Exception as e:
raise LaunchError(f"{_err_prefix}: GCS upload failed: {e}") from e
def get_gcloud_config_value(config_name: str) -> Optional[str]:
"""Get a value from gcloud config.
Arguments:
config_name: The name of the config value.
Returns:
str: The config value, or None if the value is not set.
"""
try:
output = subprocess.check_output(
["gcloud", "config", "get-value", config_name], stderr=subprocess.STDOUT
)
value = str(output.decode("utf-8").strip())
if value and "unset" not in value:
return value
return None
except subprocess.CalledProcessError:
return None
def get_default_region() -> Optional[str]:
"""Get the default region from gcloud config or environment variables.
Returns:
str: The default region, or None if it cannot be determined.
"""
region = get_gcloud_config_value("compute/region")
if not region:
region = os.environ.get(GCP_REGION_ENV_VAR)
return region
|
GcpEnvironment
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/quantization_ops/quantization_ops_test.py
|
{
"start": 14774,
"end": 18094
}
|
class ____(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_valid(self):
with ops.Graph().as_default(), context.eager_mode():
input_value = constant_op.constant([-0.8, -0.5, 0, 0.3, 0.8, -2.0],
shape=(6,),
dtype=dtypes.float32),
input_min = constant_op.constant(-127, shape=(), dtype=dtypes.float32)
input_max = constant_op.constant(127, shape=(), dtype=dtypes.float32)
num_bits = constant_op.constant(8, shape=(), dtype=dtypes.int32)
quantized = array_ops.quantize_and_dequantize_v3(
input_value,
input_min,
input_max,
num_bits,
signed_input=True,
range_given=False)
self.assertSequenceAlmostEqual(
input_value[0].numpy(), quantized.numpy()[0], delta=0.05)
@test_util.run_in_graph_and_eager_modes
def test_invalid_num_bits(self):
input_value = constant_op.constant([-0.8, -0.5, 0, 0.3, 0.8, -2.0],
shape=(6,),
dtype=dtypes.float32),
input_min = constant_op.constant(-127, shape=(), dtype=dtypes.float32)
input_max = constant_op.constant(127, shape=(), dtype=dtypes.float32)
# Tensor with invalid shape and invalid number of elements.
num_bits = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
# Test that running the op raises error. It raises different errors
# depending on whether the shape inference is run first or the op's
# Compute() is run first.
try:
array_ops.quantize_and_dequantize_v3(
input_value, input_min, input_max, num_bits, signed_input=True)
except Exception as ex: # pylint: disable=broad-except
if isinstance(ex, errors.InvalidArgumentError):
self.assertRegex(str(ex), "The `num_bits` tensor should be a scalar.")
elif isinstance(ex, ValueError):
self.assertRegex(str(ex), "Shape must be rank 0")
else:
self.fail(
"Raised exception other than expected: %s. "
"Expected exceptions are errors.InvalidArgumentError or ValueError"
% ex.__name__
)
else:
self.fail(
"Did not raise an exception where it is expected to raise either "
"a ValueError or errors.InvalidArgumentError.")
@test_util.run_in_graph_and_eager_modes
def test_invalid_input_min_max_with_axis_specified(self):
input_value = (
constant_op.constant([1.8], shape=(1,), dtype=dtypes.float32),
)
input_min = constant_op.constant(1.0, shape=(), dtype=dtypes.float32)
input_max = constant_op.constant([2.0], shape=(1,), dtype=dtypes.float32)
num_bits = 8
# Test that running the op raises error. It raises different errors
# depending on whether the shape inference is run first or the op's
# Compute() is run first.
with self.assertRaisesRegex(
(errors.InvalidArgumentError, ValueError),
"Shape must be rank 1"):
array_ops.quantize_and_dequantize_v3(
input_value,
input_min,
input_max,
num_bits=num_bits,
axis=0,
range_given=True,
)
if __name__ == "__main__":
googletest.main()
|
QuantizeAndDequantizeV3OpTest
|
python
|
huggingface__transformers
|
tests/models/textnet/test_modeling_textnet.py
|
{
"start": 1768,
"end": 7013
}
|
class ____:
def __init__(
self,
parent,
stem_kernel_size=3,
stem_stride=2,
stem_in_channels=3,
stem_out_channels=32,
stem_act_func="relu",
dropout_rate=0,
ops_order="weight_bn_act",
conv_layer_kernel_sizes=[
[[3, 3]],
[[3, 3]],
[[3, 3]],
[[3, 3]],
],
conv_layer_strides=[
[2],
[2],
[2],
[2],
],
out_features=["stage1", "stage2", "stage3", "stage4"],
out_indices=[1, 2, 3, 4],
batch_size=3,
num_channels=3,
image_size=[32, 32],
is_training=True,
use_labels=True,
num_labels=3,
hidden_sizes=[32, 32, 32, 32, 32],
):
self.parent = parent
self.stem_kernel_size = stem_kernel_size
self.stem_stride = stem_stride
self.stem_in_channels = stem_in_channels
self.stem_out_channels = stem_out_channels
self.act_func = stem_act_func
self.dropout_rate = dropout_rate
self.ops_order = ops_order
self.conv_layer_kernel_sizes = conv_layer_kernel_sizes
self.conv_layer_strides = conv_layer_strides
self.out_features = out_features
self.out_indices = out_indices
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.use_labels = use_labels
self.num_labels = num_labels
self.hidden_sizes = hidden_sizes
self.num_stages = 5
def get_config(self):
return TextNetConfig(
stem_kernel_size=self.stem_kernel_size,
stem_stride=self.stem_stride,
stem_num_channels=self.stem_in_channels,
stem_out_channels=self.stem_out_channels,
act_func=self.act_func,
dropout_rate=self.dropout_rate,
ops_order=self.ops_order,
conv_layer_kernel_sizes=self.conv_layer_kernel_sizes,
conv_layer_strides=self.conv_layer_strides,
out_features=self.out_features,
out_indices=self.out_indices,
hidden_sizes=self.hidden_sizes,
image_size=self.image_size,
)
def create_and_check_model(self, config, pixel_values, labels):
model = TextNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
scale_h = self.image_size[0] // 32
scale_w = self.image_size[1] // 32
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.hidden_sizes[-1], scale_h, scale_w),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = TextNetForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def create_and_check_backbone(self, config, pixel_values, labels):
model = TextNetBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
scale_h = self.image_size[0] // 32
scale_w = self.image_size[1] // 32
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 8 * scale_h, 8 * scale_w]
)
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
config.out_features = None
model = TextNetBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
scale_h = self.image_size[0] // 32
scale_w = self.image_size[1] // 32
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[0], scale_h, scale_w]
)
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
|
TextNetModelTester
|
python
|
Textualize__textual
|
src/textual/widgets/_input.py
|
{
"start": 1512,
"end": 2162
}
|
class ____(NamedTuple):
"""A range of selected text within the Input.
Text can be selected by clicking and dragging the mouse, or by pressing
shift+arrow keys.
Attributes:
start: The start index of the selection.
end: The end index of the selection.
"""
start: int
end: int
@classmethod
def cursor(cls, cursor_position: int) -> Selection:
"""Create a selection from a cursor position."""
return cls(cursor_position, cursor_position)
@property
def is_empty(self) -> bool:
"""Return True if the selection is empty."""
return self.start == self.end
|
Selection
|
python
|
davidhalter__jedi
|
jedi/inference/gradual/generics.py
|
{
"start": 804,
"end": 1209
}
|
class ____:
def get_index_and_execute(self, index):
try:
return self[index].execute_annotation()
except IndexError:
debug.warning('No param #%s found for annotation %s', index, self)
return NO_VALUES
def get_type_hint(self):
return '[%s]' % ', '.join(t.get_type_hint(add_class_info=False) for t in self.to_tuple())
|
_AbstractGenericManager
|
python
|
great-expectations__great_expectations
|
docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/set_based_column_map_expectation_template.py
|
{
"start": 623,
"end": 2722
}
|
class ____(SetBasedColumnMapExpectation):
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/set_based_column_map_expectation_template.py docstring">
"""TODO: Add a docstring here"""
# </snippet>
# These values will be used to configure the metric created by your expectation
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/set_based_column_map_expectation_template.py set">
set_ = []
set_camel_name = "SetName"
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/set_based_column_map_expectation_template.py semantic_name">
set_semantic_name = None
# </snippet>
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/set_based_column_map_expectation_template.py examples">
examples = []
# </snippet>
# Here your regex is used to create a custom metric for this expectation
map_metric = SetBasedColumnMapExpectation.register_metric(
set_camel_name=set_camel_name,
set_=set_,
)
# This object contains metadata for display in the public Gallery
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/set_based_column_map_expectation_template.py library_metadata">
library_metadata = {
"tags": ["set-based"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@your_name_here", # Don't forget to add your github handle here!
],
}
# </snippet>
if __name__ == "__main__":
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/set_based_column_map_expectation_template.py diagnostics">
ExpectColumnValuesToBeInSomeSet().print_diagnostic_checklist()
# </snippet>
|
ExpectColumnValuesToBeInSomeSet
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 8867,
"end": 9146
}
|
class ____(models.Model):
waters = models.CharField(max_length=200)
level = models.IntegerField()
date = models.DateTimeField()
history = HistoricalRecords(cascade_delete_history=True)
@property
def _history_date(self):
return self.date
|
WaterLevel
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/hooks/test_gcs.py
|
{
"start": 44476,
"end": 52798
}
|
class ____:
def setup_method(self):
with mock.patch(BASE_STRING.format("GoogleBaseHook.__init__")) as mock_init:
mock_init.return_value = None
self.gcs_hook = gcs.GCSHook(gcp_conn_id="test")
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_file(self, mock_service, testdata_file):
test_bucket = "test_bucket"
test_object = "test_object"
metadata = {"key1": "val1", "key2": "key2"}
bucket_mock = mock_service.return_value.bucket
blob_object = bucket_mock.return_value.blob
upload_method = blob_object.return_value.upload_from_filename
self.gcs_hook.upload(test_bucket, test_object, filename=testdata_file, metadata=metadata)
upload_method.assert_called_once_with(
filename=testdata_file, content_type="application/octet-stream", timeout=60
)
assert metadata == blob_object.return_value.metadata
@mock.patch("google.cloud.storage.Blob.upload_from_filename")
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_file_exposes_lineage(self, mock_service, mock_upload, hook_lineage_collector):
source_bucket_name = "test-source-bucket"
source_object_name = "test-source-object"
file_name = "test.txt"
mock_service.return_value.bucket.return_value = storage.Bucket(mock_service, source_bucket_name)
self.gcs_hook.upload(
bucket_name=source_bucket_name, object_name=source_object_name, filename=file_name
)
assert len(hook_lineage_collector.collected_assets.inputs) == 1
assert len(hook_lineage_collector.collected_assets.outputs) == 1
assert hook_lineage_collector.collected_assets.outputs[0].asset == Asset(
uri=f"gs://{source_bucket_name}/{source_object_name}"
)
assert hook_lineage_collector.collected_assets.inputs[0].asset == Asset(uri=f"file://{file_name}")
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_cache_control(self, mock_service, testdata_file):
test_bucket = "test_bucket"
test_object = "test_object"
cache_control = "public, max-age=3600"
bucket_mock = mock_service.return_value.bucket
blob_object = bucket_mock.return_value.blob
self.gcs_hook.upload(test_bucket, test_object, filename=testdata_file, cache_control=cache_control)
assert cache_control == blob_object.return_value.cache_control
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_file_gzip(self, mock_service, testdata_file):
test_bucket = "test_bucket"
test_object = "test_object"
self.gcs_hook.upload(test_bucket, test_object, filename=testdata_file, gzip=True)
assert not os.path.exists(testdata_file + ".gz")
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_data_str(self, mock_service, testdata_string):
test_bucket = "test_bucket"
test_object = "test_object"
upload_method = mock_service.return_value.bucket.return_value.blob.return_value.upload_from_string
self.gcs_hook.upload(test_bucket, test_object, data=testdata_string)
upload_method.assert_called_once_with(testdata_string, content_type="text/plain", timeout=60)
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_empty_filename(self, mock_service):
test_bucket = "test_bucket"
test_object = "test_object"
upload_method = mock_service.return_value.bucket.return_value.blob.return_value.upload_from_filename
self.gcs_hook.upload(test_bucket, test_object, filename="")
upload_method.assert_called_once_with(
filename="", content_type="application/octet-stream", timeout=60
)
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_empty_data(self, mock_service):
test_bucket = "test_bucket"
test_object = "test_object"
upload_method = mock_service.return_value.bucket.return_value.blob.return_value.upload_from_string
self.gcs_hook.upload(test_bucket, test_object, data="")
upload_method.assert_called_once_with("", content_type="text/plain", timeout=60)
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_data_bytes(self, mock_service, testdata_bytes):
test_bucket = "test_bucket"
test_object = "test_object"
upload_method = mock_service.return_value.bucket.return_value.blob.return_value.upload_from_string
self.gcs_hook.upload(test_bucket, test_object, data=testdata_bytes)
upload_method.assert_called_once_with(testdata_bytes, content_type="text/plain", timeout=60)
@mock.patch("google.cloud.storage.Blob.upload_from_string")
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_data_exposes_lineage(self, mock_service, mock_upload, hook_lineage_collector):
source_bucket_name = "test-source-bucket"
source_object_name = "test-source-object"
mock_service.return_value.bucket.return_value = storage.Bucket(mock_service, source_bucket_name)
self.gcs_hook.upload(bucket_name=source_bucket_name, object_name=source_object_name, data="test")
assert len(hook_lineage_collector.collected_assets.inputs) == 0
assert len(hook_lineage_collector.collected_assets.outputs) == 1
assert hook_lineage_collector.collected_assets.outputs[0].asset == Asset(
uri=f"gs://{source_bucket_name}/{source_object_name}"
)
@mock.patch(GCS_STRING.format("BytesIO"))
@mock.patch(GCS_STRING.format("gz.GzipFile"))
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_data_str_gzip(self, mock_service, mock_gzip, mock_bytes_io, testdata_string):
test_bucket = "test_bucket"
test_object = "test_object"
encoding = "utf-8"
gzip_ctx = mock_gzip.return_value.__enter__.return_value
data = mock_bytes_io.return_value.getvalue.return_value
upload_method = mock_service.return_value.bucket.return_value.blob.return_value.upload_from_string
self.gcs_hook.upload(test_bucket, test_object, data=testdata_string, gzip=True)
byte_str = bytes(testdata_string, encoding)
mock_gzip.assert_called_once_with(fileobj=mock_bytes_io.return_value, mode="w")
gzip_ctx.write.assert_called_once_with(byte_str)
upload_method.assert_called_once_with(data, content_type="text/plain", timeout=60)
@mock.patch(GCS_STRING.format("BytesIO"))
@mock.patch(GCS_STRING.format("gz.GzipFile"))
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_data_bytes_gzip(self, mock_service, mock_gzip, mock_bytes_io, testdata_bytes):
test_bucket = "test_bucket"
test_object = "test_object"
gzip_ctx = mock_gzip.return_value.__enter__.return_value
data = mock_bytes_io.return_value.getvalue.return_value
upload_method = mock_service.return_value.bucket.return_value.blob.return_value.upload_from_string
self.gcs_hook.upload(test_bucket, test_object, data=testdata_bytes, gzip=True)
mock_gzip.assert_called_once_with(fileobj=mock_bytes_io.return_value, mode="w")
gzip_ctx.write.assert_called_once_with(testdata_bytes)
upload_method.assert_called_once_with(data, content_type="text/plain", timeout=60)
@mock.patch(GCS_STRING.format("GCSHook.get_conn"))
def test_upload_exceptions(self, mock_service, testdata_file, testdata_string):
test_bucket = "test_bucket"
test_object = "test_object"
both_params_except = (
"'filename' and 'data' parameter provided. Please "
"specify a single parameter, either 'filename' for "
"local file uploads or 'data' for file content uploads."
)
no_params_except = "'filename' and 'data' parameter missing. One is required to upload to gcs."
with pytest.raises(ValueError, match=re.escape(no_params_except)):
self.gcs_hook.upload(test_bucket, test_object)
with pytest.raises(ValueError, match=re.escape(both_params_except)):
self.gcs_hook.upload(test_bucket, test_object, filename=testdata_file, data=testdata_string)
|
TestGCSHookUpload
|
python
|
pytorch__pytorch
|
test/test_modules.py
|
{
"start": 963,
"end": 51810
}
|
class ____(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
precision = 1e-5
rel_tol = 1e-5
def _assert_module_parameters_and_buffer_are(self, module, device, dtype):
# Check device placement and dtype for created parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg or methods
# such as `float()` applies to.
if not isinstance(device, torch.device):
device = torch.device(device)
def _check_module(items, name, device=device, dtype=dtype):
for item_name, item in items:
self.assertEqual(
item.device, device,
f'{name} {item_name} is on device {item.device} instead of the expected device {device}')
if item.dtype.is_floating_point:
self.assertEqual(
item.dtype, dtype,
f'{name} {item_name} is of dtype {item.dtype} instead of the expected dtype {dtype}')
_check_module(module.named_parameters(), "Parameter")
_check_module(module.named_buffers(), "Buffer")
@modules(module_db)
def test_forward(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
dtype_to_method_caller = {
torch.float32: methodcaller("float"),
torch.float64: methodcaller("double"),
}
for module_input in module_inputs:
if module_input.forward_input is None:
continue
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
outputs = m(*args, **kwargs)
# === Compare outputs to a reference if one is specified. ===
# TODO: Handle precision
reference_fn = module_input.reference_fn
if reference_fn is not None:
ref_outputs = reference_fn(m, *args, **kwargs)
self.assertEqual(outputs, ref_outputs)
# === Use the method call and verify the parameters and buffers ===
if dtype in dtype_to_method_caller:
dtype_to_method_caller[dtype](m)
m(*args, **kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
# Tests passing factory kwargs (e.g. device / dtype) during module instantiation.
# They should be applied to any created parameters and buffers.
@modules(module_db)
def test_factory_kwargs(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
# Check if this module creates parameters or registers buffers.
# The mock magic here passes through to the real Parameter / register_buffer
# logic and is only used to check call inputs.
module_creates_params_or_buffers = False
parameter_new = mock_wrapper(torch.nn.Parameter.__new__)
with patch.object(torch.nn.Parameter, '__new__', parameter_new):
register_buffer = mock_wrapper(torch.nn.Module.register_buffer)
with patch.object(torch.nn.Module, 'register_buffer', register_buffer):
m = module_cls(*args, **kwargs)
m.train(training)
# Check if a parameter or buffer was created with a tensor not passed to the constructor.
constructor_tensors = get_tensors_from(args, kwargs)
for mock in [parameter_new.mock, register_buffer.mock]:
for call_args, call_kwargs in mock.call_args_list:
call_tensors = get_tensors_from(call_args, call_kwargs)
if len(call_tensors) > 0 and not constructor_tensors.intersection(call_tensors):
module_creates_params_or_buffers = True
break
if not module_creates_params_or_buffers:
continue
# Instantiate module with the factory kwargs.
kwargs.update({
'device': device,
'dtype': dtype,
})
if issubclass(module_info.module_cls, torch.nn.modules.lazy.LazyModuleMixin):
# Ensure device and dtype are passed to all UninitializedParameters and UninitializedBuffers.
uninit_param_new = mock_wrapper(torch.nn.UninitializedParameter.__new__)
with patch.object(torch.nn.UninitializedParameter, '__new__', uninit_param_new):
uninit_buffer_new = mock_wrapper(torch.nn.UninitializedBuffer.__new__)
with patch.object(torch.nn.UninitializedBuffer, '__new__', uninit_buffer_new):
m = module_cls(*args, **kwargs)
m.train(training)
uninit_param_new.mock.assert_has_calls(
[call(device=device, dtype=dtype) for _ in uninit_param_new.mock.mock_calls])
uninit_buffer_new.mock.assert_has_calls(
[call(device=device, dtype=dtype) for _ in uninit_buffer_new.mock.mock_calls])
else:
# Check device placement and dtype for created parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg applies to.
m = module_cls(*args, **kwargs)
m.train(training)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
@onlyCUDA
@modules(module_db)
def test_multiple_device_transfer(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs_device = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
module_inputs_cpu = module_info.module_inputs_func(module_info, device="cpu", dtype=dtype,
requires_grad=False, training=training)
for module_input_device, module_input_cpu in zip(module_inputs_device, module_inputs_cpu):
if module_input_device.forward_input is None:
continue
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input_device.constructor_input.args, module_input_device.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Do forward pass on GPU ===
input_device_args = module_input_device.forward_input.args
input_device_kwargs = module_input_device.forward_input.kwargs
m(*input_device_args, **input_device_kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
# === Move to CPU ===
input_cpu_args = module_input_cpu.forward_input.args
input_cpu_kwargs = module_input_cpu.forward_input.kwargs
m.cpu()
m(*input_cpu_args, **input_cpu_kwargs)
self._assert_module_parameters_and_buffer_are(m, "cpu", dtype)
# === Move back to GPU and forward pass ===
m.cuda()
m(*input_device_args, **input_device_kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
if torch.cuda.device_count() >= 2:
# === test cross-GPU transfer works
def _to_device1(objs):
if isinstance(objs, (tuple, list)):
return type(objs)(_to_device1(item) for item in objs)
elif isinstance(objs, dict):
return {name: _to_device1(item) for name, item in objs.items()}
elif isinstance(objs, torch.Tensor):
return objs.cuda(1)
else:
return objs
input_device_1_args = _to_device1(input_device_args)
input_device_1_kwargs = _to_device1(input_device_kwargs)
m.cuda(1)
with torch.cuda.device(1):
m(*input_device_1_args, **input_device_1_kwargs)
self._assert_module_parameters_and_buffer_are(m, torch.device("cuda:1"), dtype)
@modules(module_db)
def test_repr(self, device, dtype, module_info, training):
# Test module can be represented with repr and str without errors.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# Check that these methods do not raise errors
m.__repr__()
str(m)
@modules(module_db)
def test_save_load(self, device, dtype, module_info, training):
# Test that module can be pickled and unpickled.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
if module_input.forward_input is None:
continue
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
sd = m.state_dict()
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
output = m(*args, **kwargs)
# === Check saved/loaded module gives the same output. ===
with tempfile.TemporaryFile() as f:
torch.save(m, f)
f.seek(0)
# weights_only=False as this is legacy code that saves the model
m_copy = torch.load(f, weights_only=False)
output_from_copy = m_copy(*args, **kwargs)
self.assertEqual(output, output_from_copy)
# === Check saved/loaded state_dict are the same (including weights_only load). ===
with tempfile.TemporaryFile() as f:
torch.save(sd, f)
f.seek(0)
sd_copy = torch.load(f)
self.assertEqual(sd_copy, sd)
del sd_copy
f.seek(0)
sd_copy_wo = torch.load(f, weights_only=True)
self.assertEqual(sd_copy_wo, sd)
@skipMeta
@modules([module_info for module_info in module_db
if 'inplace' in signature(module_info.module_cls).parameters])
def test_check_inplace(self, device, dtype, module_info, training):
# Check if the inplace variant of the module gives the same result as the out of place
# variant.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m_op = module_cls(*args, **kwargs, inplace=False)
m_op.to(device).to(dtype)
m_op.train(training)
m_inplace = module_cls(*args, **kwargs, inplace=True)
m_inplace.to(device).to(dtype)
m_inplace.train(training)
# === Inplace modules only supports inplace operations on the first argument ===
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
# === Do not allow the first input to be in input_kwargs ===
forward_sig = signature(m_op).parameters
self.assertGreaterEqual(len(forward_sig), 1)
first_param_name = next(iter(forward_sig.items()))
self.assertNotIn(first_param_name, input_kwargs)
# === Out of place operation does not write to original tensor ===
self.assertGreaterEqual(len(input_args), 1)
input_version = input_args[0]._version
with freeze_rng_state():
output_op = m_op(*input_args, **input_kwargs)
self.assertEqual(input_args[0]._version, input_version)
# === Check that the inplace operation gives the same result ===
input_arg_copy = deepcopy(input_args)
input_arg_clone = tuple(i.clone() for i in input_arg_copy)
input_clone_version = input_arg_clone[0]._version
with freeze_rng_state():
output_ip = m_inplace(*input_arg_clone, **input_kwargs)
self.assertGreater(input_arg_clone[0]._version, input_clone_version)
self.assertEqual(output_op, output_ip)
# === Check that the gradients are the same ===
grad = output_op.data.clone().normal_()
output_op.backward(grad)
output_ip.backward(grad)
self.assertEqual(input_args[0].grad, input_arg_copy[0].grad)
def _traverse_obj(self, obj, func):
if isinstance(obj, (tuple, list)):
return type(obj)(self._traverse_obj(o, func) for o in obj)
elif isgenerator(obj):
return tuple(self._traverse_obj(o, func) for o in obj)
elif isinstance(obj, dict):
return {name: self._traverse_obj(o, func) for name, o in obj.items()}
elif isinstance(obj, (torch.Tensor, torch.nn.Parameter)):
return func(obj)
else:
return obj
def _retain_grad(self, obj):
# gradients needs to be retained to check for grad. This is useful when
# non-leaves are present in the graph.
def inner_retain_grad(obj):
if obj.requires_grad:
obj.retain_grad()
self._traverse_obj(obj, inner_retain_grad)
def _get_grads(self, obj):
def inner_get_grad(obj):
if obj.requires_grad:
return obj.grad
return self._traverse_obj(obj, inner_get_grad)
def _zero_grad(self, obj):
def inner_zero_grad(obj):
if obj.grad is not None:
obj.grad = None
self._traverse_obj(obj, inner_zero_grad)
@modules(module_db)
def test_non_contiguous_tensors(self, device, dtype, module_info, training):
# Check modules work with non-contiguous tensors
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
def _make_non_contiguous(obj):
def inner_make_non_contiguous(obj):
# Scalar tensors can not be made non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return obj
out = torch.repeat_interleave(obj, 2, dim=-1)
out = out[..., ::2].detach()
out.requires_grad = obj.requires_grad
return out
return self._traverse_obj(obj, inner_make_non_contiguous)
def _can_be_noncontiguous(obj):
if isinstance(obj, (tuple, list)):
return any(_can_be_noncontiguous(o) for o in obj)
elif isinstance(obj, dict):
return any(_can_be_noncontiguous(o) for o in obj.values())
# scalar tensors can not be non-contiguous
return isinstance(obj, torch.Tensor) and obj.dim() != 0
for module_input in module_inputs:
if module_input.forward_input is None:
continue
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if not (_can_be_noncontiguous(input_args) or _can_be_noncontiguous(input_kwargs)):
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
self._retain_grad((input_args, input_kwargs))
# === Forward with default input
with freeze_rng_state():
default_output = m(*input_args, **input_kwargs)
if isinstance(default_output, torch.Tensor):
grad_output = default_output.clone().detach_().normal_()
default_output.backward(grad_output, retain_graph=True)
else:
grad_output = tuple(self._traverse_obj(o, lambda o: o.clone().detach_().normal_() if o.requires_grad else None)
for o in default_output)
flattened_default_output = torch.utils._pytree.tree_leaves(default_output)
flattened_grad_output = torch.utils._pytree.tree_leaves(grad_output)
for o, g_o in zip(flattened_default_output, flattened_grad_output):
if (o.requires_grad):
o.backward(g_o, retain_graph=True)
default_input_args_grad, default_input_kwargs_grad = deepcopy(self._get_grads((input_args, input_kwargs)))
default_param_grad = deepcopy([p.grad for p in m.parameters()])
# === Construct non-contiguous tensors ===
nc_input_args, nc_input_kwargs = _make_non_contiguous((input_args, input_kwargs))
nc_grad_output = _make_non_contiguous(grad_output)
# === Compare results with non-contiguous and contiguous tensors ===
inputs = [(input_args, input_kwargs), (nc_input_args, nc_input_kwargs)]
grads = [grad_output, nc_grad_output]
for (in_args, in_kwargs), g_out in product(inputs, grads):
g_out_copy = deepcopy(g_out)
self._zero_grad((in_args, in_kwargs))
self._zero_grad(m.parameters())
with freeze_rng_state():
out = m(*in_args, **in_kwargs)
if isinstance(out, torch.Tensor):
out.backward(g_out_copy, retain_graph=True)
else:
flattened_out = torch.utils._pytree.tree_leaves(out)
flattened_g_out_copy = torch.utils._pytree.tree_leaves(g_out_copy)
for o, g_o in zip(flattened_out, flattened_g_out_copy):
if o.requires_grad:
o.backward(g_o, retain_graph=True)
input_args_grad, input_kwargs_grad = self._get_grads((in_args, in_kwargs))
self.assertEqual(out, default_output)
self.assertEqual(input_args_grad, default_input_args_grad, atol=1e-4, rtol=0)
self.assertEqual(input_kwargs_grad, default_input_kwargs_grad, atol=1e-4, rtol=0)
param_grad = [p.grad for p in m.parameters()]
self.assertEqual(param_grad, default_param_grad)
def _test_gradients_helper(self, device, dtype, module_info, training, check):
# Check gradients
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
# === Set nondet tol for gradcheck to user-defined value if on CUDA and cudNN is enabled
gradcheck_nondet_tol = 0.0
if (torch.device(device).type == 'cuda' and torch.backends.cudnn.enabled):
gradcheck_nondet_tol = module_info.gradcheck_nondet_tol
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
params = tuple(m.parameters())
# === Lazy modules need to see an input to initialize params before gradcheck is run. ===
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if issubclass(module_info.module_cls, torch.nn.modules.lazy.LazyModuleMixin):
with torch.no_grad():
m(*input_args, **input_kwargs)
# === Perform gradient check on the input_args ===
other_kwargs = {}
kwarg_tensors = []
for name, obj in input_kwargs.items():
if isinstance(obj, torch.Tensor):
kwarg_tensors.append((name, obj))
else:
other_kwargs[name] = obj
def fn_to_gradcheck(*flat_input_and_params):
input_and_params = torch.utils._pytree.tree_unflatten(flat_input_and_params, flat_spec)
new_input_args = input_and_params[:len(input_args)]
kwarg_args = input_and_params[-len(kwarg_tensors):]
new_kwargs = {name: obj for (name, _), obj in zip(kwarg_tensors, kwarg_args)}
with freeze_rng_state():
output = m(*new_input_args, **new_kwargs, **other_kwargs)
output_flattened = torch.utils._pytree.tree_leaves(output)
return output_flattened
def do_check(flat_input):
self.assertTrue(
check(
fn_to_gradcheck,
flat_input,
nondet_tol=gradcheck_nondet_tol,
fast_mode=module_info.gradcheck_fast_mode
))
# check total derivative
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
do_check(flat_input)
# check partial derivatives
old_params_requires_grad = [p.requires_grad for p in params]
for p in params:
p.requires_grad = False
old_kwargs_requires_grad = [obj.requires_grad for (_, obj) in kwarg_tensors]
for (_, obj) in kwarg_tensors:
obj.requires_grad = False
for p, old in zip(params, old_params_requires_grad):
p.requires_grad = old
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
do_check(flat_input)
p.requires_grad = False
for (_, obj), old in zip(kwarg_tensors, old_kwargs_requires_grad):
obj.requires_grad = old
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
do_check(flat_input)
obj.requires_grad = False
@modules(module_db, allowed_dtypes=[torch.double])
def test_grad(self, device, dtype, module_info, training):
self._test_gradients_helper(device, dtype, module_info, training, gradcheck)
@modules([m for m in module_db if m.supports_gradgrad],
allowed_dtypes=[torch.double])
def test_gradgrad(self, device, dtype, module_info, training):
self._test_gradients_helper(device, dtype, module_info, training, gradgradcheck)
@onlyCUDA
@with_tf32_off # Turn off TF32 to compute at full precision https://github.com/pytorch/pytorch/issues/86798
@toleranceOverride({torch.float32: tol(5e-2, 0),
torch.float64: tol(4e-4, 0)})
@modules(module_db)
def test_cpu_gpu_parity(self, device, dtype, module_info, training):
# TODO: RNN / GRU / LSTM don't support backwards on eval mode for cuDNN; skip this in a
# nicer way for eval mode only.
# See https://github.com/pytorch/pytorch/issues/79161
rnn_modules = {torch.nn.RNN, torch.nn.GRU, torch.nn.LSTM}
if (module_info.module_cls in rnn_modules
and not training
and 'cuda' in device
and torch.backends.cudnn.enabled):
return
# Test cpu and gpu results are the same
module_cls = module_info.module_cls
module_inputs_cpu = module_info.module_inputs_func(module_info, device="cpu", dtype=dtype,
requires_grad=True, training=training)
def _to_device(obj):
if isinstance(obj, torch.Tensor):
res = obj.detach().to(device=device)
res.requires_grad = obj.requires_grad
return res
elif isinstance(obj, tuple):
return tuple(_to_device(o) for o in obj)
elif isinstance(obj, dict):
return {key: _to_device(o) for key, o in obj.items()}
else:
return deepcopy(obj)
for module_input in module_inputs_cpu:
# === Move input from cpu to device ===
cpu_forward_args = module_input.forward_input.args
cpu_forward_kwargs = module_input.forward_input.kwargs
gpu_forward_args, gpu_forward_kwargs = _to_device((cpu_forward_args, cpu_forward_kwargs))
self._retain_grad((cpu_forward_args, cpu_forward_kwargs, gpu_forward_args, gpu_forward_kwargs))
# === Construct module on cpu and gpu ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
cpu_module = module_cls(*args, **kwargs).to(dtype).to("cpu")
cpu_module.train(training)
gpu_module = module_cls(*args, **kwargs).to(dtype).to(device)
gpu_module.train(training)
# === Lazy modules need to see an input to initialize params ===
if issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin):
with torch.no_grad():
cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
gpu_p.data.copy_(cpu_p)
# === Compare forward output between cpu and gpu ===
cpu_outputs = cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_outputs = gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
self.assertEqual(cpu_outputs, gpu_outputs)
# === Run backwards on CPU and GPU and compare results ===
def check_backward(cpu_output, gpu_output):
cpu_grad_output = cpu_output.clone().normal_()
gpu_grad_output = cpu_grad_output.type_as(gpu_output)
cpu_output.backward(cpu_grad_output, retain_graph=True)
gpu_output.backward(gpu_grad_output, retain_graph=True)
cpu_grad_input = self._get_grads(cpu_forward_args)
gpu_grad_input = self._get_grads(gpu_forward_args)
self.assertEqual(cpu_grad_input, gpu_grad_input)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
self.assertEqual(cpu_p.grad, gpu_p.grad)
cpu_grad_kwarg_input = self._get_grads(cpu_forward_kwargs)
gpu_grad_kwarg_input = self._get_grads(gpu_forward_kwargs)
self.assertEqual(cpu_grad_kwarg_input, gpu_grad_kwarg_input)
for _ in range(5):
if isinstance(cpu_outputs, torch.Tensor):
check_backward(cpu_outputs, gpu_outputs)
else:
flatten_cpu_outputs = torch.utils._pytree.tree_leaves(cpu_outputs)
flatten_gpu_outputs = torch.utils._pytree.tree_leaves(gpu_outputs)
for cpu_output, gpu_output in zip(flatten_cpu_outputs, flatten_gpu_outputs):
if cpu_output.requires_grad:
check_backward(cpu_output, gpu_output)
@with_tf32_off
@modules(module_db)
def test_memory_format(self, device, dtype, module_info, training):
is_sm86or80 = device.startswith("cuda") and (torch.cuda.get_device_capability(0) == (8, 6)
or torch.cuda.get_device_capability(0) == (8, 0))
# TODO tighten it to a specific module
atol, rtol = (3e-3, 7e-3) if is_sm86or80 else (None, None)
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
module_memformat_affects_out = module_info.module_memformat_affects_out
def _get_mem_formats(channels_last=False, channels_last_3d=False):
if channels_last:
return ([torch.contiguous_format, torch.channels_last],
[torch.preserve_format, torch.contiguous_format, torch.channels_last])
elif channels_last_3d:
return ([torch.contiguous_format, torch.channels_last_3d],
[torch.preserve_format, torch.contiguous_format, torch.channels_last_3d])
else:
return ([torch.contiguous_format],
[torch.preserve_format, torch.contiguous_format])
# Check that at least one Tensor input has dim == n
def _check_dims(obj, n):
if isinstance(obj, torch.Tensor):
return obj.dim() == n
elif isinstance(obj, (tuple, list)):
return any(_check_dims(o, n) for o in obj)
else:
return False
# Called after _check_dims, when we know that >= 1 tensor can be converted to mem_format
def _to_mem_format(mem_format, obj):
def inner_to_mem_format(obj):
d = obj.dim()
if ((mem_format == torch.channels_last and d != 4)
or (mem_format == torch.channels_last_3d and d != 5)):
return obj.detach().clone().requires_grad_(obj.requires_grad)
return obj.clone().to(memory_format=mem_format).detach().requires_grad_(obj.requires_grad)
return self._traverse_obj(obj, inner_to_mem_format)
def _check_out_mem_format(output, input_mem_format, module_mem_format):
def inner_check_out_mem_format(output):
d = output.dim()
if (d == 4 and ((input_mem_format == torch.channels_last)
or (module_mem_format == torch.channels_last and module_memformat_affects_out))):
self.assertTrue(output.numel() == 0 or output.is_contiguous(memory_format=torch.channels_last))
elif (d == 5 and ((input_mem_format == torch.channels_last_3d)
or (module_mem_format == torch.channels_last_3d and module_memformat_affects_out))):
self.assertTrue(output.numel() == 0 or output.is_contiguous(memory_format=torch.channels_last_3d))
else:
self.assertTrue(output.is_contiguous())
return self._traverse_obj(output, inner_check_out_mem_format)
def _req_grad(t):
return isinstance(t, torch.Tensor) and t.requires_grad
for module_input in module_inputs:
if module_input.forward_input is None:
continue
supports_channels_last = _check_dims(module_input.forward_input.args, 4)
supports_channels_last_3d = _check_dims(module_input.forward_input.args, 5)
input_mem_formats, module_mem_formats = _get_mem_formats(supports_channels_last, supports_channels_last_3d)
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Get output in (contiguous, contiguous) configuration. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
desired_outputs = m(*args, **kwargs)
# === Do backward pass. ===
ref_diff_outputs = tuple(t for t in torch.utils._pytree.tree_leaves(desired_outputs) if _req_grad(t))
if training and len(ref_diff_outputs) > 0:
params = tuple(p for p in m.parameters())
ref_diff_inputs = tuple(
t
for t in torch.utils._pytree.tree_leaves((args, kwargs, params))
if _req_grad(t)
)
ref_grad_outputs = tuple(
torch.rand_like(t)
for t in ref_diff_outputs
)
ref_grad_inputs = torch.autograd.grad(
ref_diff_outputs,
ref_diff_inputs,
grad_outputs=ref_grad_outputs,
)
for input_mem_format in input_mem_formats:
# === Change memformat of input. ===
d_args = _to_mem_format(input_mem_format, module_input.forward_input.args)
d_kwargs = _to_mem_format(input_mem_format, module_input.forward_input.kwargs)
# See https://github.com/pytorch/pytorch/issues/107861
# When inductor tests are turned on, the setting of requires_grad will be lost
for t1, t2 in zip(
torch.utils._pytree.tree_leaves(d_args),
torch.utils._pytree.tree_leaves(module_input.forward_input.args),
):
t1.requires_grad_(t2.requires_grad)
for t1, t2 in zip(
torch.utils._pytree.tree_leaves(d_kwargs),
torch.utils._pytree.tree_leaves(module_input.forward_input.kwargs),
):
t1.requires_grad_(t2.requires_grad)
module_input.forward_input.args = d_args
module_input.forward_input.kwargs = d_kwargs
for module_mem_format in module_mem_formats:
# === Change memformat of module ===
m.to(memory_format=module_mem_format)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
outputs = m(*args, **kwargs)
# === Compare outputs to (contiguous, contiguous) output. ===
if input_mem_format != torch.contiguous_format or module_mem_format != torch.contiguous_format:
self.assertEqual(outputs, desired_outputs, rtol=rtol, atol=atol)
# === Check mem format of output. ===
_check_out_mem_format(outputs, input_mem_format, module_mem_format)
# === Do backward pass. ===
diff_outputs = tuple(t for t in torch.utils._pytree.tree_leaves(outputs) if _req_grad(t))
if training and len(diff_outputs) > 0:
params = tuple(p for p in m.parameters())
diff_inputs = tuple(
t
for t in torch.utils._pytree.tree_leaves((args, kwargs, params))
if _req_grad(t)
)
grad_outputs = tuple(
torch.empty_like(t1).copy_(t2)
for (t1, t2) in zip(diff_outputs, ref_grad_outputs)
)
grad_inputs = torch.autograd.grad(
diff_outputs,
diff_inputs,
grad_outputs=grad_outputs,
)
if (
input_mem_format != torch.contiguous_format
or module_mem_format != torch.contiguous_format
):
self.assertEqual(
grad_inputs, ref_grad_inputs, rtol=rtol, atol=atol
)
# === Check mem format of grad_inputs. ===
_check_out_mem_format(grad_inputs, input_mem_format, module_mem_format)
# Test whether train and eval modes differ for each module. Use to verify
# that the ModuleInfo entry flag is correct.
@modules(module_db, train_eval_mode=TrainEvalMode.train_only)
def test_if_train_and_eval_modes_differ(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
# Run forward inputs through to see if the training flag is accessed during forward.
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# Remove training attribute and see if forward still works.
delattr(m, 'training')
# === Do forward pass. ===
try:
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
m(*args, **kwargs)
except AttributeError as e:
if "'training'" in str(e):
self.assertTrue(module_info.train_and_eval_differ,
f"The ModuleInfo entry for {module_info.name} has "
"train_and_eval_differ=False, but the training mode was found to "
"affect the forward pass. Consider setting train_and_eval_differ=True "
"for this ModuleInfo entry.")
else:
raise e
@onlyCPU
@modules(module_db)
def test_device_ctx_init(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
with torch.device('meta'):
module_inputs_meta = module_info.module_inputs_func(module_info, device=None, dtype=dtype,
requires_grad=False, training=training)
for module_input, module_input_meta in zip(module_inputs, module_inputs_meta):
c_args, c_kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
c_args_meta, c_kwargs_meta = module_input_meta.constructor_input.args, module_input_meta.constructor_input.kwargs
m_cpu = module_cls(*c_args, **c_kwargs)
with torch.device('meta'):
m = module_cls(*c_args_meta, **c_kwargs_meta)
for (p_meta, p_cpu) in chain(zip(m.parameters(), m_cpu.parameters()),
zip(m.buffers(), m_cpu.buffers())):
if torch.nn.parameter.is_lazy(p_meta):
continue
self.assertTrue(p_meta.is_meta)
assert_metadata_eq(self.assertEqual, p_meta, p_cpu)
@modules([module for module in module_db if module.module_error_inputs_func is not None])
def test_errors(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
error_inputs = module_info.module_error_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for error_input in error_inputs:
module_input = error_input.module_error_input
c_args, c_kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
if error_input.error_on == ModuleErrorEnum.CONSTRUCTION_ERROR:
with self.assertRaisesRegex(error_input.error_type, error_input.error_regex):
m = module_cls(*c_args, **c_kwargs)
elif error_input.error_on == ModuleErrorEnum.FORWARD_ERROR:
m = module_cls(*c_args, **c_kwargs)
fw_args, fw_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
with self.assertRaisesRegex(error_input.error_type, error_input.error_regex):
m(*fw_args, **fw_kwargs)
else:
raise NotImplementedError(f"Unknown error type {error_input.error_on}")
# Only run this test for float32 because the test loops over all the dtypes
@modules([module for module in module_db if not module.is_lazy], allowed_dtypes=[torch.float32])
@parametrize('swap', [True, False])
@parametrize('set_grad', [True, False])
@wrapSwapTensorsTest()
def test_to(self, device, dtype, module_info, training, swap, set_grad):
module_cls = module_info.module_cls
devices = ['cpu']
if torch.cuda.is_available():
devices += ['cuda']
dtypes = module_info.dtypes
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
torch.__future__.set_swap_module_params_on_conversion(swap)
for module_input in module_inputs:
c_args, c_kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
m = module_cls(*c_args, **c_kwargs)
# Avoid using `module.to()` when constructing module since that is the method we are testing
def _to(m, set_grad=False):
for c in m.children():
_to(c, set_grad=set_grad)
for n, p in m.named_parameters(recurse=False):
new_p = torch.nn.Parameter(p.detach().clone().to(device, dtype))
setattr(m, n, new_p)
if set_grad:
new_p.grad = torch.randn_like(new_p)
for n, b in m.named_buffers(recurse=False):
new_b = b.detach().clone().to(device, dtype)
setattr(m, n, new_b)
_to(m, set_grad=set_grad)
# Check .to() can be run after forward and backward with swap
has_params = len(list(m.parameters())) > 0
if swap and not set_grad and has_params:
out = m(*args, **kwargs)
if isinstance(out, tuple):
out = out[0]
out.sum().backward()
m.to(dtype=torch.half)
# reset
m.to(dtype=torch.float32)
prev_device, prev_dtype = device, dtype
for device_, dtype_ in product(devices, dtypes):
# if device/dtype do not change, grad.to(device, dtype) is a no-op so
# swapping will not change ._cdata
# parameters will be wrapped in an nn.Parameter before swapping
# which will cause the ._cdata to change
g_no_swap = device_ == prev_device and dtype_ == prev_dtype
prev_device, prev_dtype = device_, dtype_
p_ids_before = [id(p) for p in m.parameters()]
p_cdatas_before = [p._cdata for p in m.parameters()]
if set_grad:
g_ids_before = [id(p.grad) for p in m.parameters()]
g_cdatas_before = [p.grad._cdata for p in m.parameters()]
m.to(device=device_, dtype=dtype_)
self.assertTrue(all(isinstance(p, torch.nn.Parameter) for p in m.parameters()))
self.assertTrue(all(p.device.type == device_ for p in m.parameters()))
self.assertTrue(all(p.dtype == dtype_ for p in m.parameters()))
p_ids_after = [id(p) for p in m.parameters()]
p_cdatas_after = [p._cdata for p in m.parameters()]
if set_grad:
self.assertTrue(all(p.grad.device.type == device_ for p in m.parameters()))
self.assertTrue(all(p.grad.dtype == dtype_ for p in m.parameters()))
g_ids_after = [id(p.grad) for p in m.parameters()]
g_cdatas_after = [p.grad._cdata for p in m.parameters()]
if swap:
# id same, ._cdata differs --> swapped cdata of THPVariable
self.assertTrue(all(a == b for a, b in zip(p_ids_before, p_ids_after)))
self.assertTrue(all(a != b for a, b in zip(p_cdatas_before, p_cdatas_after)))
if set_grad:
self.assertTrue(
all(a == b if g_no_swap else a != b for a, b in zip(g_cdatas_before, g_cdatas_after)))
else:
# id and _cdata remain the same --> .data setting
self.assertTrue(all(a == b for a, b in zip(p_cdatas_before, p_cdatas_after)))
self.assertTrue(all(a == b for a, b in zip(p_ids_before, p_ids_after)))
if set_grad:
self.assertTrue(all(a == b for a, b in zip(g_cdatas_before, g_cdatas_after)))
self.assertTrue(all(a == b for a, b in zip(g_ids_before, g_ids_after)))
@modules([module for module in module_db if not module.is_lazy], allowed_dtypes=[torch.float32])
@parametrize('swap', [True, False])
@wrapSwapTensorsTest()
def test_to_empty(self, device, dtype, module_info, swap, training):
module_cls = module_info.module_cls
with torch.device("meta"):
module_inputs = module_info.module_inputs_func(module_info, device=None, dtype=dtype,
requires_grad=False, training=training)
torch.__future__.set_swap_module_params_on_conversion(swap)
device_ = torch.device(device)
for module_input in module_inputs:
c_args, c_kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
with torch.device("meta"):
m = module_cls(*c_args, **c_kwargs)
p_ids_before = [id(p) for p in m.parameters()]
p_cdatas_before = [p._cdata for p in m.parameters()]
m.to_empty(device=device_)
self.assertTrue(all(isinstance(p, torch.nn.Parameter) for p in m.parameters()))
self.assertTrue(all(p.device == device_ for p in m.parameters()))
self.assertTrue(all(p.dtype == dtype for p in m.parameters()))
p_ids_after = [id(p) for p in m.parameters()]
p_cdatas_after = [p._cdata for p in m.parameters()]
if swap:
# id same, ._cdata differs --> swapped cdata of THPVariable
self.assertTrue(all(a == b for a, b in zip(p_ids_before, p_ids_after)))
self.assertTrue(all(a != b for a, b in zip(p_cdatas_before, p_cdatas_after)))
else:
# id and ._cdata differ
# meta and device have different shallow copy types, so this will create a new
# parameter and assign it to the module
self.assertTrue(all(a != b for a, b in zip(p_ids_before, p_ids_after)))
self.assertTrue(all(a != b for a, b in zip(p_cdatas_before, p_cdatas_after)))
instantiate_device_type_tests(TestModule, globals(), allow_mps=True)
if __name__ == '__main__':
run_tests()
|
TestModule
|
python
|
jina-ai__jina
|
tests/docker_compose/reload-executor/reload_executor.py
|
{
"start": 53,
"end": 353
}
|
class ____(Executor):
def __init__(self, argument, *args, **kwargs):
super().__init__(*args, **kwargs)
self.argument = argument
@requests()
def exec(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.tags['argument'] = self.argument
|
ReloadExecutor
|
python
|
doocs__leetcode
|
solution/0000-0099/0033.Search in Rotated Sorted Array/Solution.py
|
{
"start": 0,
"end": 584
}
|
class ____:
def search(self, nums: List[int], target: int) -> int:
n = len(nums)
left, right = 0, n - 1
while left < right:
mid = (left + right) >> 1
if nums[0] <= nums[mid]:
if nums[0] <= target <= nums[mid]:
right = mid
else:
left = mid + 1
else:
if nums[mid] < target <= nums[n - 1]:
left = mid + 1
else:
right = mid
return left if nums[left] == target else -1
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 250444,
"end": 251077
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of MarkFileAsViewed"""
__schema__ = github_schema
__field_names__ = ("pull_request_id", "path", "client_mutation_id")
pull_request_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="pullRequestId")
"""The Node ID of the pull request."""
path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path")
"""The path of the file to mark as viewed"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
MarkFileAsViewedInput
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/property14.py
|
{
"start": 193,
"end": 502
}
|
class ____:
def __init__(self):
self._something = []
@property
def something(self) -> Sequence[Hashable]:
return self._something
@something.setter
def something(self, thing: list[HashableT]):
self._something = thing
f = ClassA()
f.something = ["a", "b", "c"]
|
ClassA
|
python
|
weaviate__weaviate-python-client
|
weaviate/config.py
|
{
"start": 2418,
"end": 3668
}
|
class ____(BaseModel):
"""Use this class to specify the connection and proxy settings for your client when connecting to Weaviate.
When specifying the timeout, you can either provide a tuple with the query and insert timeouts, or a `Timeout` object.
The `Timeout` object gives you additional option to configure the `init` timeout, which controls how long the client
initialisation checks will wait for before throwing. This is useful when you have a slow network connection.
When specifying the proxies, be aware that supplying a URL (`str`) will populate all of the `http`, `https`, and grpc proxies.
In order for this to be possible, you must have a proxy that is capable of handling simultaneous HTTP/1.1 and HTTP/2 traffic.
"""
connection: ConnectionConfig = Field(default_factory=ConnectionConfig)
proxies: Union[str, Proxies, None] = Field(default=None)
timeout_: Union[Tuple[int, int], Timeout] = Field(default_factory=Timeout, alias="timeout")
trust_env: bool = Field(default=False)
@property
def timeout(self) -> Timeout:
if isinstance(self.timeout_, tuple):
return Timeout(query=self.timeout_[0], insert=self.timeout_[1])
return self.timeout_
|
AdditionalConfig
|
python
|
walkccc__LeetCode
|
solutions/2790. Maximum Number of Groups With Increasing Length/2790.py
|
{
"start": 0,
"end": 351
}
|
class ____:
def maxIncreasingGroups(self, usageLimits: list[int]) -> int:
ans = 1 # the next target length
availableLimits = 0
for usageLimit in sorted(usageLimits):
availableLimits += usageLimit
# Can create groups 1, 2, ..., ans.
if availableLimits >= ans * (ans + 1) // 2:
ans += 1
return ans - 1
|
Solution
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 6873,
"end": 7333
}
|
class ____(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
"""Only allow changing objects with even id number"""
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
def has_view_permission(self, request, obj=None):
"""Only allow viewing objects if id is a multiple of 3."""
return request.user.is_staff and obj is not None and obj.id % 3 == 0
|
RowLevelChangePermissionModelAdmin
|
python
|
kamyu104__LeetCode-Solutions
|
Python/sorting-the-sentence.py
|
{
"start": 48,
"end": 432
}
|
class ____(object):
def sortSentence(self, s):
"""
:type s: str
:rtype: str
"""
words = s.split()
for i in xrange(len(words)):
while int(words[i][-1])-1 != i:
words[int(words[i][-1])-1], words[i] = words[i], words[int(words[i][-1])-1]
return " ".join(itertools.imap(lambda x: x[:-1], words))
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/545. Boundary of Binary Tree/545.py
|
{
"start": 0,
"end": 991
}
|
class ____:
def boundaryOfBinaryTree(self, root: TreeNode | None) -> list[int]:
if not root:
return []
ans = [root.val]
def dfs(root: TreeNode | None, lb: bool, rb: bool):
"""
1. root.left is left boundary if root is left boundary.
root.right if left boundary if root.left is None.
2. Same applys for right boundary.
3. If root is left boundary, add it before 2 children - preorder.
If root is right boundary, add it after 2 children - postorder.
4. A leaf that is neighter left/right boundary belongs to the bottom.
"""
if not root:
return
if lb:
ans.append(root.val)
if not lb and not rb and not root.left and not root.right:
ans.append(root.val)
dfs(root.left, lb, rb and not root.right)
dfs(root.right, lb and not root.left, rb)
if rb:
ans.append(root.val)
dfs(root.left, True, False)
dfs(root.right, False, True)
return ans
|
Solution
|
python
|
lazyprogrammer__machine_learning_examples
|
rl/approx_prediction.py
|
{
"start": 1224,
"end": 3388
}
|
class ____:
def __init__(self, grid):
# fit the featurizer to data
samples = gather_samples(grid)
# self.featurizer = Nystroem()
self.featurizer = RBFSampler()
self.featurizer.fit(samples)
dims = self.featurizer.n_components
# initialize linear model weights
self.w = np.zeros(dims)
def predict(self, s):
x = self.featurizer.transform([s])[0]
return x @ self.w
def grad(self, s):
x = self.featurizer.transform([s])[0]
return x
if __name__ == '__main__':
# use the standard grid again (0 for every step) so that we can compare
# to iterative policy evaluation
grid = standard_grid()
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# state -> action
greedy_policy = {
(2, 0): 'U',
(1, 0): 'U',
(0, 0): 'R',
(0, 1): 'R',
(0, 2): 'R',
(1, 2): 'R',
(2, 1): 'R',
(2, 2): 'R',
(2, 3): 'U',
}
model = Model(grid)
mse_per_episode = []
# repeat until convergence
n_episodes = 10000
for it in range(n_episodes):
if (it + 1) % 100 == 0:
print(it + 1)
s = grid.reset()
Vs = model.predict(s)
n_steps = 0
episode_err = 0
while not grid.game_over():
a = epsilon_greedy(greedy_policy, s)
r = grid.move(a)
s2 = grid.current_state()
# get the target
if grid.is_terminal(s2):
target = r
else:
Vs2 = model.predict(s2)
target = r + GAMMA * Vs2
# update the model
g = model.grad(s)
err = target - Vs
model.w += ALPHA * err * g
# accumulate error
n_steps += 1
episode_err += err*err
# update state
s = s2
Vs = Vs2
mse = episode_err / n_steps
mse_per_episode.append(mse)
plt.plot(mse_per_episode)
plt.title("MSE per episode")
plt.show()
# obtain predicted values
V = {}
states = grid.all_states()
for s in states:
if s in grid.actions:
V[s] = model.predict(s)
else:
# terminal state or state we can't otherwise get to
V[s] = 0
print("values:")
print_values(V, grid)
print("policy:")
print_policy(greedy_policy, grid)
|
Model
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-removals-to-make-mountain-array.py
|
{
"start": 49,
"end": 863
}
|
class ____(object):
def minimumMountainRemovals(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left_lis_len = [0]*len(nums)
lis = []
for i in xrange(len(nums)-1):
j = bisect.bisect_left(lis, nums[i])
if j == len(lis):
lis.append(nums[i])
else:
lis[j] = nums[i]
left_lis_len[i] = j
max_len = 0
lis = []
for i in reversed(xrange(1, len(nums))):
j = bisect.bisect_left(lis, nums[i])
if j == len(lis):
lis.append(nums[i])
else:
lis[j] = nums[i]
if i < len(nums)-1:
max_len = max(max_len, left_lis_len[i]+j)
return len(nums) - (1+max_len)
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/integrations/github/status_check.py
|
{
"start": 429,
"end": 854
}
|
class ____(enum.Enum):
"""
GitHub Check Run conclusion values (when status is completed).
https://docs.github.com/en/rest/checks/runs?apiVersion=2022-11-28#create-a-check-run
"""
ACTION_REQUIRED = "action_required"
CANCELLED = "cancelled"
FAILURE = "failure"
NEUTRAL = "neutral"
SKIPPED = "skipped"
STALE = "stale"
SUCCESS = "success"
TIMED_OUT = "timed_out"
|
GitHubCheckConclusion
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/parallel_for/control_flow_ops_test.py
|
{
"start": 73267,
"end": 74655
}
|
class ____(PForTestCase):
def test_loop_variant_cond(self):
x = [1, 2, 3, 4, 5.]
y = 2.5
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
# Note that the output has a combination of then and else branches being
# loop variant / invariant.
return cond_v2.cond_v2(x_i < y, lambda: (y - x_i, y, 1., 2.), lambda:
(x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
def test_loop_invariant_cond(self):
x = [1, 2, 3, 4, 5.]
y = 0.5
z = random_ops.random_uniform([])
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
# Note that the output has a combination of then and else branches being
# loop variant / invariant.
return cond_v2.cond_v2(z < y, lambda: (y - x_i, y, 1., 2.), lambda:
(x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
def test_empty_branch(self):
x = [1, 2, 3, 4, 5.]
y = 6.
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
return cond_v2.cond_v2(
x_i < y, # Note that else branch is empty.
lambda: (y - x_i, y, 1., 2.),
lambda: (x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
|
StatelessIfTest
|
python
|
viewflow__viewflow
|
viewflow/forms/renderers.py
|
{
"start": 19527,
"end": 20504
}
|
class ____(LayoutNode):
"""Place elements vertically stacked, one under another.
Example:
layout = Layout(
Row(
Column('first_name', 'last_name', desktop=8, tablet=6)
'sex_options'
)
)
"""
def __init__(self, *elements, **kwargs):
self.id_ = kwargs.pop("id_", None)
self.children = _convert_to_children(elements)
super().__init__(**kwargs)
def append(self, layout: FormLayout, form: forms.Form, root: ElementTree.Element):
if self.children:
wrapper = ElementTree.SubElement(
root,
"div",
{
"class": "vf-form-column mdc-layout-grid__cell mdc-layout-grid__cell--span-12"
},
)
if self.id_:
wrapper.attrib["id"] = self.id_
for child in self.children:
child.append(layout, form, wrapper)
|
Column
|
python
|
huggingface__transformers
|
src/transformers/models/bart/modeling_bart.py
|
{
"start": 2403,
"end": 3491
}
|
class ____(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self, input_ids: torch.Tensor, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
):
"""`input_ids' shape is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids.shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
).expand(bsz, -1)
else:
position_ids = position_ids.unsqueeze(0)
return super().forward(position_ids + self.offset)
|
BartLearnedPositionalEmbedding
|
python
|
Textualize__textual
|
tests/css/test_css_reloading.py
|
{
"start": 214,
"end": 332
}
|
class ____(Screen[None]):
def compose(self) -> ComposeResult:
yield Label("I am the base screen")
|
BaseScreen
|
python
|
Textualize__textual
|
tests/test_await_remove.py
|
{
"start": 64,
"end": 164
}
|
class ____(Label):
async def on_mount(self) -> None:
await self.remove()
|
SelfRemovingLabel
|
python
|
python-attrs__attrs
|
src/attr/_make.py
|
{
"start": 77170,
"end": 84046
}
|
class ____:
"""
*Read-only* representation of an attribute.
.. warning::
You should never instantiate this class yourself.
The class has *all* arguments of `attr.ib` (except for ``factory`` which is
only syntactic sugar for ``default=Factory(...)`` plus the following:
- ``name`` (`str`): The name of the attribute.
- ``alias`` (`str`): The __init__ parameter name of the attribute, after
any explicit overrides and default private-attribute-name handling.
- ``inherited`` (`bool`): Whether or not that attribute has been inherited
from a base class.
- ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The
callables that are used for comparing and ordering objects by this
attribute, respectively. These are set by passing a callable to
`attr.ib`'s ``eq``, ``order``, or ``cmp`` arguments. See also
:ref:`comparison customization <custom-comparison>`.
Instances of this class are frequently used for introspection purposes
like:
- `fields` returns a tuple of them.
- Validators get them passed as the first argument.
- The :ref:`field transformer <transform-fields>` hook receives a list of
them.
- The ``alias`` property exposes the __init__ parameter name of the field,
with any overrides and default private-attribute handling applied.
.. versionadded:: 20.1.0 *inherited*
.. versionadded:: 20.1.0 *on_setattr*
.. versionchanged:: 20.2.0 *inherited* is not taken into account for
equality checks and hashing anymore.
.. versionadded:: 21.1.0 *eq_key* and *order_key*
.. versionadded:: 22.2.0 *alias*
For the full version history of the fields, see `attr.ib`.
"""
# These slots must NOT be reordered because we use them later for
# instantiation.
__slots__ = ( # noqa: RUF023
"name",
"default",
"validator",
"repr",
"eq",
"eq_key",
"order",
"order_key",
"hash",
"init",
"metadata",
"type",
"converter",
"kw_only",
"inherited",
"on_setattr",
"alias",
)
def __init__(
self,
name,
default,
validator,
repr,
cmp, # XXX: unused, remove along with other cmp code.
hash,
init,
inherited,
metadata=None,
type=None,
converter=None,
kw_only=False,
eq=None,
eq_key=None,
order=None,
order_key=None,
on_setattr=None,
alias=None,
):
eq, eq_key, order, order_key = _determine_attrib_eq_order(
cmp, eq_key or eq, order_key or order, True
)
# Cache this descriptor here to speed things up later.
bound_setattr = _OBJ_SETATTR.__get__(self)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("eq", eq)
bound_setattr("eq_key", eq_key)
bound_setattr("order", order)
bound_setattr("order_key", order_key)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr(
"metadata",
(
types.MappingProxyType(dict(metadata)) # Shallow copy
if metadata
else _EMPTY_METADATA_SINGLETON
),
)
bound_setattr("type", type)
bound_setattr("kw_only", kw_only)
bound_setattr("inherited", inherited)
bound_setattr("on_setattr", on_setattr)
bound_setattr("alias", alias)
def __setattr__(self, name, value):
raise FrozenInstanceError
@classmethod
def from_counting_attr(
cls, name: str, ca: _CountingAttr, kw_only: bool, type=None
):
# The 'kw_only' argument is the class-level setting, and is used if the
# attribute itself does not explicitly set 'kw_only'.
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
msg = f"Type annotation and type argument cannot both be present for '{name}'."
raise ValueError(msg)
return cls(
name,
ca._default,
ca._validator,
ca.repr,
None,
ca.hash,
ca.init,
False,
ca.metadata,
type,
ca.converter,
kw_only if ca.kw_only is None else ca.kw_only,
ca.eq,
ca.eq_key,
ca.order,
ca.order_key,
ca.on_setattr,
ca.alias,
)
# Don't use attrs.evolve since fields(Attribute) doesn't work
def evolve(self, **changes):
"""
Copy *self* and apply *changes*.
This works similarly to `attrs.evolve` but that function does not work
with :class:`attrs.Attribute`.
It is mainly meant to be used for `transform-fields`.
.. versionadded:: 20.3.0
"""
new = copy.copy(self)
new._setattrs(changes.items())
return new
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(
getattr(self, name) if name != "metadata" else dict(self.metadata)
for name in self.__slots__
)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
self._setattrs(zip(self.__slots__, state))
def _setattrs(self, name_values_pairs):
bound_setattr = _OBJ_SETATTR.__get__(self)
for name, value in name_values_pairs:
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(
name,
(
types.MappingProxyType(dict(value))
if value
else _EMPTY_METADATA_SINGLETON
),
)
_a = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
eq=True,
order=False,
hash=(name != "metadata"),
init=True,
inherited=False,
alias=_default_init_alias_for(name),
)
for name in Attribute.__slots__
]
Attribute = _add_hash(
_add_eq(
_add_repr(Attribute, attrs=_a),
attrs=[a for a in _a if a.name != "inherited"],
),
attrs=[a for a in _a if a.hash and a.name != "inherited"],
)
|
Attribute
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/sql.py
|
{
"start": 7962,
"end": 8049
}
|
class ____:
UniqueText = db.String(512)
LongText = LongText
|
MySQLCompatabilityTypes
|
python
|
keras-team__keras
|
keras/src/layers/normalization/layer_normalization_test.py
|
{
"start": 186,
"end": 4817
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_ln_basics(self):
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_regularizer": regularizers.L2(0.01),
"beta_regularizer": regularizers.L2(0.01),
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_initializer": "ones",
"beta_initializer": "ones",
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"scale": False, "center": False},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"rms_scaling": True},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"axis": (-3, -2, -1)},
input_shape=(2, 8, 8, 3),
expected_output_shape=(2, 8, 8, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={},
input_shape=(1, 0, 10),
expected_output_shape=(1, 0, 10),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
("Expected an int or a list/tuple of ints for the argument 'axis'"),
):
layers.LayerNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.LayerNormalization(dtype="float32")
layer.build(input_shape=(2, 2, 2))
inputs = np.random.normal(
loc=5.0, scale=10.0, size=(1000, 2, 2, 2)
).astype("float32")
out = layer(inputs)
out = ops.subtract(out, layer.beta)
out = ops.divide(out, layer.gamma)
self.assertAllClose(ops.mean(out), 0.0, atol=1e-1)
self.assertAllClose(ops.std(out), 1.0, atol=1e-1)
def test_output(self):
layer = layers.LayerNormalization(
dtype="float32",
beta_initializer="ones",
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[-0.41386, 0.29307, 1.0, 1.70693, 2.41386]])
def test_output_with_rms_scaling(self):
layer = layers.LayerNormalization(
dtype="float32",
rms_scaling=True,
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[0.0, 0.70693, 1.41386, 2.12079, 2.82772]])
def test_large_value_within_autocast_scope(self):
layer = layers.LayerNormalization()
layer.build((1, 4, 4, 3))
# Use 70000 to trigger overflow for float16
large_value = ops.full(layer.gamma.shape, 70000)
with backend.AutocastScope("float16"):
layer.gamma.assign(large_value)
self.assertAllClose(layer.gamma.value, large_value)
|
LayerNormalizationTest
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_rsa.py
|
{
"start": 59636,
"end": 60446
}
|
class ____:
def test_invalid_algorithm(self):
mgf = padding.MGF1(hashes.SHA256())
with pytest.raises(TypeError):
padding.OAEP(
mgf=mgf,
algorithm=b"", # type:ignore[arg-type]
label=None,
)
def test_algorithm_property(self):
algorithm = hashes.SHA256()
mgf = padding.MGF1(algorithm)
oaep = padding.OAEP(mgf=mgf, algorithm=algorithm, label=None)
assert oaep.algorithm == algorithm
assert oaep.algorithm == oaep._algorithm
def test_mgf_property(self):
algorithm = hashes.SHA256()
mgf = padding.MGF1(algorithm)
oaep = padding.OAEP(mgf=mgf, algorithm=algorithm, label=None)
assert oaep.mgf == mgf
assert oaep.mgf == oaep._mgf
|
TestOAEP
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/_param_validation.py
|
{
"start": 19373,
"end": 19937
}
|
class ____(_Constraint):
"""Constraint representing boolean likes.
Convenience class for
[bool, np.bool_]
"""
def __init__(self):
super().__init__()
self._constraints = [
_InstancesOf(bool),
_InstancesOf(np.bool_),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
|
_Booleans
|
python
|
lazyprogrammer__machine_learning_examples
|
cnn_class2/tf_resnet_identity_block.py
|
{
"start": 413,
"end": 3736
}
|
class ____:
def __init__(self, mi, fm_sizes, activation=tf.nn.relu):
# conv1, conv2, conv3
# note: # feature maps shortcut = # feauture maps conv 3
assert(len(fm_sizes) == 3)
# note: kernel size in 2nd conv is always 3
# so we won't bother including it as an arg
self.session = None
self.f = tf.nn.relu
# init main branch
# Conv -> BN -> F() ---> Conv -> BN -> F() ---> Conv -> BN
self.conv1 = ConvLayer(1, mi, fm_sizes[0], 1)
self.bn1 = BatchNormLayer(fm_sizes[0])
self.conv2 = ConvLayer(3, fm_sizes[0], fm_sizes[1], 1, 'SAME')
self.bn2 = BatchNormLayer(fm_sizes[1])
self.conv3 = ConvLayer(1, fm_sizes[1], fm_sizes[2], 1)
self.bn3 = BatchNormLayer(fm_sizes[2])
# in case needed later
self.layers = [
self.conv1, self.bn1,
self.conv2, self.bn2,
self.conv3, self.bn3,
]
# this will not be used when input passed in from
# a previous layer
self.input_ = tf.placeholder(tf.float32, shape=(1, 224, 224, mi))
self.output = self.forward(self.input_)
def forward(self, X):
# main branch
FX = self.conv1.forward(X)
FX = self.bn1.forward(FX)
FX = self.f(FX)
FX = self.conv2.forward(FX)
FX = self.bn2.forward(FX)
FX = self.f(FX)
FX = self.conv3.forward(FX)
FX = self.bn3.forward(FX)
return self.f(FX + X)
def predict(self, X):
assert(self.session is not None)
return self.session.run(
self.output,
feed_dict={self.input_: X}
)
def set_session(self, session):
# need to make this a session
# so assignment happens on sublayers too
self.session = session
self.conv1.session = session
self.bn1.session = session
self.conv2.session = session
self.bn2.session = session
self.conv3.session = session
self.bn3.session = session
def copyFromKerasLayers(self, layers):
assert(len(layers) == 10)
# <keras.layers.convolutional.Conv2D at 0x7fa44255ff28>,
# <keras.layers.normalization.BatchNormalization at 0x7fa44250e7b8>,
# <keras.layers.core.Activation at 0x7fa44252d9e8>,
# <keras.layers.convolutional.Conv2D at 0x7fa44253af60>,
# <keras.layers.normalization.BatchNormalization at 0x7fa4424e4f60>,
# <keras.layers.core.Activation at 0x7fa442494828>,
# <keras.layers.convolutional.Conv2D at 0x7fa4424a2da0>,
# <keras.layers.normalization.BatchNormalization at 0x7fa44244eda0>,
# <keras.layers.merge.Add at 0x7fa44245d5c0>,
# <keras.layers.core.Activation at 0x7fa44240aba8>
self.conv1.copyFromKerasLayers(layers[0])
self.bn1.copyFromKerasLayers(layers[1])
self.conv2.copyFromKerasLayers(layers[3])
self.bn2.copyFromKerasLayers(layers[4])
self.conv3.copyFromKerasLayers(layers[6])
self.bn3.copyFromKerasLayers(layers[7])
def get_params(self):
params = []
for layer in self.layers:
params += layer.get_params()
return params
if __name__ == '__main__':
identity_block = IdentityBlock(mi=256, fm_sizes=[64, 64, 256])
# make a fake image
X = np.random.random((1, 224, 224, 256))
init = tf.global_variables_initializer()
with tf.Session() as session:
identity_block.set_session(session)
session.run(init)
output = identity_block.predict(X)
print("output.shape:", output.shape)
|
IdentityBlock
|
python
|
gevent__gevent
|
src/gevent/tests/test__selectors.py
|
{
"start": 1604,
"end": 3845
}
|
class ____(SelectorTestMixin,
greentest.TestCase):
def test_select_using_socketpair(self):
# Basic test.
with selectors.GeventSelector() as sel:
self._check_selector(sel)
def test_select_many_sockets(self):
try:
AF_UNIX = socket.AF_UNIX
except AttributeError:
AF_UNIX = None
pairs = [socket.socketpair() for _ in range(10)]
try:
server_sel = selectors.GeventSelector()
client_sel = selectors.GeventSelector()
for i, pair in enumerate(pairs):
server, client = pair
server_sel.register(server, selectors.EVENT_READ,
self.read_from_ready_socket_and_reply)
client_sel.register(client, selectors.EVENT_READ, i)
# Prime them all to be ready at once.
data = str(i).encode('ascii')
client.send(data)
# Read and reply to all the clients..
# Everyone should be ready, so we ask not to block.
# The call to gevent.idle() is there to make sure that
# all event loop implementations (looking at you, libuv)
# get a chance to poll for IO. Without it, libuv
# doesn't find any results here.
# Not blocking only works for AF_UNIX sockets, though.
# If we got AF_INET (Windows) the data may need some time to
# traverse through the layers.
gevent.idle()
self.run_selector_once(
server_sel,
timeout=-1 if pairs[0][0].family == AF_UNIX else 3)
found = 0
for key, _ in client_sel.select(timeout=3):
expected = str(key.data).encode('ascii')
data = key.fileobj.recv(50)
self.assertEqual(data, expected)
found += 1
self.assertEqual(found, len(pairs))
finally:
server_sel.close()
client_sel.close()
for pair in pairs:
for s in pair:
s.close()
@greentest.skipOnWindows("Things like os.close don't work on Windows")
|
GeventSelectorTest
|
python
|
python-pillow__Pillow
|
Tests/test_image_access.py
|
{
"start": 304,
"end": 3199
}
|
class ____:
def test_sanity(self) -> None:
im1 = hopper()
im2 = Image.new(im1.mode, im1.size, 0)
for y in range(im1.size[1]):
for x in range(im1.size[0]):
pos = x, y
value = im1.getpixel(pos)
assert value is not None
im2.putpixel(pos, value)
assert_image_equal(im1, im2)
im2 = Image.new(im1.mode, im1.size, 0)
im2.readonly = 1
for y in range(im1.size[1]):
for x in range(im1.size[0]):
pos = x, y
value = im1.getpixel(pos)
assert value is not None
im2.putpixel(pos, value)
assert not im2.readonly
assert_image_equal(im1, im2)
im2 = Image.new(im1.mode, im1.size, 0)
pix1 = im1.load()
pix2 = im2.load()
assert pix1 is not None
assert pix2 is not None
with pytest.raises(TypeError):
pix1[0, "0"] # type: ignore[index]
with pytest.raises(TypeError):
pix1["0", 0] # type: ignore[index]
for y in range(im1.size[1]):
for x in range(im1.size[0]):
pix2[x, y] = pix1[x, y]
assert_image_equal(im1, im2)
def test_sanity_negative_index(self) -> None:
im1 = hopper()
im2 = Image.new(im1.mode, im1.size, 0)
width, height = im1.size
assert im1.getpixel((0, 0)) == im1.getpixel((-width, -height))
assert im1.getpixel((-1, -1)) == im1.getpixel((width - 1, height - 1))
for y in range(-1, -im1.size[1] - 1, -1):
for x in range(-1, -im1.size[0] - 1, -1):
pos = x, y
value = im1.getpixel(pos)
assert value is not None
im2.putpixel(pos, value)
assert_image_equal(im1, im2)
im2 = Image.new(im1.mode, im1.size, 0)
im2.readonly = 1
for y in range(-1, -im1.size[1] - 1, -1):
for x in range(-1, -im1.size[0] - 1, -1):
pos = x, y
value = im1.getpixel(pos)
assert value is not None
im2.putpixel(pos, value)
assert not im2.readonly
assert_image_equal(im1, im2)
im2 = Image.new(im1.mode, im1.size, 0)
pix1 = im1.load()
pix2 = im2.load()
assert pix1 is not None
assert pix2 is not None
for y in range(-1, -im1.size[1] - 1, -1):
for x in range(-1, -im1.size[0] - 1, -1):
pix2[x, y] = pix1[x, y]
assert_image_equal(im1, im2)
@pytest.mark.skipif(numpy is None, reason="NumPy not installed")
def test_numpy(self) -> None:
im = hopper()
px = im.load()
assert px is not None
assert numpy is not None
assert px[numpy.int32(1), numpy.int32(2)] == (18, 20, 59)
|
TestImagePutPixel
|
python
|
encode__django-rest-framework
|
tests/utils.py
|
{
"start": 496,
"end": 923
}
|
class ____:
def __init__(self, iterable):
self.items = iterable
def __getitem__(self, val):
return self.items[val]
def get(self, **lookup):
for item in self.items:
if all([
attrgetter(key.replace('__', '.'))(item) == value
for key, value in lookup.items()
]):
return item
raise ObjectDoesNotExist()
|
MockQueryset
|
python
|
walkccc__LeetCode
|
solutions/2418. Sort the People/2418.py
|
{
"start": 0,
"end": 219
}
|
class ____:
def sortPeople(self, names: list[str], heights: list[int]) -> list[str]:
return [height for _, height in
sorted([(height, name) for name, height in zip(names, heights)], reverse=True)]
|
Solution
|
python
|
pypa__warehouse
|
tests/common/db/organizations.py
|
{
"start": 5791,
"end": 5998
}
|
class ____(WarehouseFactory):
class Meta:
model = TeamRole
role_name = TeamRoleType.Member
user = factory.SubFactory(UserFactory)
team = factory.SubFactory(TeamFactory)
|
TeamRoleFactory
|
python
|
ray-project__ray
|
doc/source/custom_directives.py
|
{
"start": 20257,
"end": 114285
}
|
class ____:
"""Object which holds configuration data for a set of library examples."""
def __init__(
self, path: Union[pathlib.Path, str], src_dir: Union[pathlib.Path, str]
):
"""Parse a config file containing examples to display in the example gallery.
Parameters
----------
path : Union[pathlib.Path, str]
Path to the `examples.yml` to be parsed
root_dir : Union[pathlib.Path, str]
Path to the root of the docs directory; `app.srcdir`
"""
if isinstance(path, str):
path = pathlib.Path(path)
if not path.exists():
raise ValueError(f"No configuration file found at {path}.")
with open(path, "r") as f:
config = yaml.safe_load(f)
self.config_path = path
self.library = Library.from_path(path)
self.examples = self.parse_examples(config.get("examples", []), src_dir)
self.text = config.get("text", "")
self.columns_to_show = self.parse_columns_to_show(
config.get("columns_to_show", [])
)
groupby = config.get("groupby", "skill_level")
for cls in ExampleEnum.__subclasses__():
if cls.key() == groupby:
self.groupby = cls
break
else:
valid_classes = [cls.key() for cls in ExampleEnum.__subclasses__()]
raise ValueError(
f"Unable to find class to group example entries by {groupby}. "
f"Valid choices are {valid_classes}.",
)
def parse_columns_to_show(self, columns: str) -> Dict[str, type]:
"""Parse the columns to show in the library example page for the config.
Note that a link to the example is always shown, and cannot be hidden.
Parameters
----------
columns : str
Column names to show; valid names are any subclass of ExampleEnum, e.g.
UseCase, Framework, etc.
Returns
-------
List[type]
A list of the ExampleEnum classes for which columns are to be shown
"""
cols = {}
for col in columns:
if col == "use_cases":
cols["use_cases"] = UseCase
elif col == "frameworks":
cols["frameworks"] = Framework
else:
raise ValueError(
f"Invalid column name {col} specified in {self.config_path}"
)
return cols
def parse_examples(
self, example_config: List[Dict[str, str]], src_dir: Union[pathlib.Path, str]
) -> List[Example]:
"""Parse the examples in the given configuration.
Raise an exception if duplicate examples are found in the configuration file.
Parameters
----------
path : pathlib.Path
Path to the example file to parse
Returns
-------
List[Example]
List of examples from the parsed configuration file
"""
links = set()
examples = []
for entry in example_config:
example = Example(
entry, self.library, self.config_path.relative_to(src_dir).parent
)
if example.link in links:
raise ValueError(
f"A duplicate example {example.link} was specified in "
f"{self.config_path}. Please remove duplicates and rebuild."
)
links.add(example.link)
examples.append(example)
return examples
def __iter__(self):
yield from self.examples
def setup_context(app, pagename, templatename, context, doctree):
def render_library_examples(config: pathlib.Path = None) -> bs4.BeautifulSoup:
"""Render a table of links to examples for a given Ray library.
Duplicate examples will result in an error.
Parameters
----------
config : pathlib.Path
Path to the examples.yml file for the Ray library
Returns
-------
bs4.BeautifulSoup
Table of links to examples for the library, rendered as HTML
"""
if config is None:
config = (pathlib.Path(app.confdir) / pagename).parent / "examples.yml"
# Keep track of whether any of the examples have frameworks metadata; the
# column will not be shown if no frameworks metadata exists on any example.
# Group the examples by the ExampleConfig.groupby value:
examples = defaultdict(list)
example_config = ExampleConfig(config, app.srcdir)
for example in example_config:
try:
group = getattr(example, example_config.groupby.key())
except AttributeError as e:
raise AttributeError(
f"Example {example.link} has no {example_config.groupby.key()} "
"key, but needs one because the examples for library "
f"{example_config.library.value} are configured to be grouped "
f"by {example_config.groupby.key()}."
) from e
examples[group].append(example)
# Construct a table of examples
soup = bs4.BeautifulSoup()
# Add the main heading to the page and include the page text
page_title = soup.new_tag("h1")
page_title.append(f"{example_config.library.value} Examples")
soup.append(page_title)
page_text = soup.new_tag("p")
page_text.append(example_config.text)
soup.append(page_text)
container = soup.new_tag("div", attrs={"class": "example-index"})
for group, group_examples in examples.items():
if not group_examples:
continue
header = soup.new_tag("h2", attrs={"class": "example-header"})
header.append(group.value)
container.append(header)
table = soup.new_tag("table", attrs={"class": ["table", "example-table"]})
# If there are additional columns to show besides just the example link,
# include column titles in the table header
if len(example_config.columns_to_show) > 0:
thead = soup.new_tag("thead")
thead_row = soup.new_tag("tr")
for example_enum in example_config.columns_to_show.values():
col_header = soup.new_tag("th")
col_label = soup.new_tag("p")
col_label.append(example_enum.formatted_name())
col_header.append(col_label)
thead_row.append(col_header)
link_col = soup.new_tag("th")
link_label = soup.new_tag("p")
link_label.append("Example")
link_col.append(link_label)
thead_row.append(link_col)
thead.append(thead_row)
table.append(thead)
tbody = soup.new_tag("tbody")
for example in group_examples:
tr = soup.new_tag("tr")
# The columns specify which attributes of each example to show;
# for each attribute, a new cell value is added with the attribute from
# the example
if len(example_config.columns_to_show) > 0:
for attribute in example_config.columns_to_show:
col_td = soup.new_tag("td")
col_p = soup.new_tag("p")
attribute_value = getattr(example, attribute, "")
if isinstance(attribute_value, str):
col_p.append(attribute_value)
elif isinstance(attribute_value, list):
col_p.append(
", ".join(item.value for item in attribute_value)
)
col_td.append(col_p)
tr.append(col_td)
link_td = soup.new_tag("td")
link_p = soup.new_tag("p")
if example.link.startswith("http"):
link_href = soup.new_tag("a", attrs={"href": example.link})
else:
link_href = soup.new_tag(
"a", attrs={"href": context["pathto"](example.link)}
)
link_span = soup.new_tag("span")
link_span.append(example.title)
link_href.append(link_span)
link_p.append(link_href)
link_td.append(link_p)
tr.append(link_td)
tbody.append(tr)
table.append(tbody)
container.append(table)
soup.append(container)
return soup
def render_example_gallery(configs: Iterable[pathlib.Path] = None) -> str:
"""Load and render examples for the example gallery.
This function grabs examples from the various example gallery indexes.
Each Ray library team maintains its own yml file which a standardized
set of metadata with each example to ensure consistency across the
code base.
Duplicate examples will not raise an error as long as their fields match
exactly.
Parameters
----------
configs : Iterable[pathlib.Path]
Paths to example gallery files to ingest
Returns
-------
str
Example gallery examples rendered as HTML
"""
if configs is None:
configs = EXAMPLE_GALLERY_CONFIGS
examples = {}
for config in configs:
config_path = pathlib.Path(config).relative_to("source")
example_config = ExampleConfig(
pathlib.Path(app.confdir) / config_path, app.srcdir
)
for example in example_config:
# Check the ingested examples for duplicates. If there are duplicates,
# check that they have the same field values, and keep only one.
if example.link in examples:
existing_example_fields = vars(examples[example.link])
for key, value in vars(example):
if existing_example_fields.get(key) != value:
raise ValueError(
"One example was specified twice with different "
f"attributes: {vars(example)}\n"
f"{existing_example_fields}"
)
else:
examples[example.link] = example
soup = bs4.BeautifulSoup()
list_area = soup.new_tag("div", attrs={"class": ["example-list-area"]})
for example in examples.values():
example_div = soup.new_tag("div", attrs={"class": "example"})
if example.link.startswith("http"):
link = example.link
else:
link = context["pathto"](example.link)
example_link = soup.new_tag(
"a",
attrs={
"class": "example-link",
"href": link,
},
)
example_icon_area = soup.new_tag(
"div", attrs={"class": "example-icon-area"}
)
icon = soup.new_tag(
"img",
attrs={
"class": "example-icon",
"src": f"../_static/img/icon_bg_{random.randint(1, 5)}.jpg",
},
)
remix_icon = soup.new_tag(
"i", attrs={"class": f"{random.choice(REMIX_ICONS)} remix-icon"}
)
icon.append(remix_icon)
example_icon_area.append(icon)
example_link.append(example_icon_area)
example_text_area = soup.new_tag(
"div", attrs={"class": "example-text-area"}
)
example_title = soup.new_tag("b", attrs={"class": "example-title"})
example_title.append(example.title)
example_text_area.append(example_title)
example_tags = soup.new_tag("span", attrs={"class": "example-tags"})
frameworks = [item.value for item in example.frameworks]
example_tags.append(
". ".join(
[example.skill_level.value, example.library.value, *frameworks]
)
+ "."
)
example_text_area.append(example_tags)
other_keywords = soup.new_tag(
"span", attrs={"class": "example-other-keywords"}
)
other_keywords.append(
" ".join(use_case.value for use_case in example.use_cases)
)
other_keywords.append(f" {example.contributor.tag}")
example_text_area.append(other_keywords)
# Add the appropriate text if the example comes from the community
if example.contributor == Contributor.COMMUNITY:
community_text_area = soup.new_tag(
"div", attrs={"class": "community-text-area"}
)
community_text = soup.new_tag("i", attrs={"class": "community-text"})
community_text.append("*Contributed by the Ray Community")
community_text_area.append(community_text)
# Add emojis separately; they're not italicized in the mockups
emojis = soup.new_tag("span", attrs={"class": "community-emojis"})
emojis.append("💪 ✨")
community_text_area.append(emojis)
example_text_area.append(community_text_area)
example_link.append(example_text_area)
example_div.append(example_link)
list_area.append(example_div)
soup.append(list_area)
return soup
@lru_cache(maxsize=None)
def render_header_nav_links() -> bs4.BeautifulSoup:
"""Render external header links into the top nav bar.
The structure rendered here is defined in an external yaml file.
Returns
-------
str
Raw HTML to be rendered in the top nav bar
"""
if not hasattr(app.config, "navbar_content"):
raise ValueError(
"A template is attempting to call render_header_nav_links(); a "
"navbar configuration must be specified."
)
node = nodes.container(classes=["navbar-content"])
node.append(render_header_nodes(app.config.navbar_content))
header_soup = bs4.BeautifulSoup(
app.builder.render_partial(node)["fragment"], "html.parser"
)
return add_nav_chevrons(header_soup)
def render_header_nodes(
obj: List[NavEntry], is_top_level: bool = True
) -> nodes.Node:
"""Generate a set of header nav links with docutils nodes.
Parameters
----------
is_top_level : bool
True if the call to this function is rendering the top level nodes,
False otherwise (non-top level nodes are displayed as submenus of the top
level nodes)
obj : List[NavEntry]
List of yaml config entries to render as docutils nodes
Returns
-------
nodes.Node
Bullet list which will be turned into header nav HTML by the sphinx builder
"""
bullet_list = nodes.bullet_list(
bullet="-",
classes=["navbar-toplevel" if is_top_level else "navbar-sublevel"],
)
for item in obj:
if "file" in item:
ref_node = make_refnode(
app.builder,
context["current_page_name"],
item["file"],
None,
nodes.inline(classes=["navbar-link-title"], text=item.get("title")),
item.get("title"),
)
elif "link" in item:
ref_node = nodes.reference("", "", internal=False)
ref_node["refuri"] = item.get("link")
ref_node["reftitle"] = item.get("title")
ref_node.append(
nodes.inline(classes=["navbar-link-title"], text=item.get("title"))
)
if "caption" in item:
caption = nodes.Text(item.get("caption"))
ref_node.append(caption)
paragraph = nodes.paragraph()
paragraph.append(ref_node)
container = nodes.container(classes=["ref-container"])
container.append(paragraph)
list_item = nodes.list_item(
classes=["active-link"] if item.get("file") == pagename else []
)
list_item.append(container)
if "sections" in item:
wrapper = nodes.container(classes=["navbar-dropdown"])
wrapper.append(
render_header_nodes(item["sections"], is_top_level=False)
)
list_item.append(wrapper)
bullet_list.append(list_item)
return bullet_list
def render_use_cases_dropdown() -> bs4.BeautifulSoup:
return render_example_gallery_dropdown(UseCase)
def render_libraries_dropdown() -> bs4.BeautifulSoup:
return render_example_gallery_dropdown(Library)
def render_frameworks_dropdown() -> bs4.BeautifulSoup:
return render_example_gallery_dropdown(Framework)
def render_contributor_dropdown() -> bs4.BeautifulSoup:
return render_example_gallery_dropdown(Contributor)
def show_edit_button() -> bool:
"""Check whether to show the "Edit on GitHub" button on the current page.
Returns
-------
bool
True if the page is tracked by git, and therefore should show
an "Edit on GitHub" button; False otherwise (which happens e.g.
when the page is generated during the docs build process)
"""
if "page_source_suffix" in context:
file_name = f"{pagename}{context['page_source_suffix']}"
return file_name in TRACKED_FILES
return False
context["show_edit_button"] = show_edit_button()
context["cached_toctree"] = preload_sidebar_nav(
context["toctree"],
context["pathto"],
context["root_doc"],
pagename,
)
context["render_header_nav_links"] = render_header_nav_links
context["render_library_examples"] = render_library_examples
context["render_example_gallery"] = render_example_gallery
context["render_use_cases_dropdown"] = render_use_cases_dropdown
context["render_libraries_dropdown"] = render_libraries_dropdown
context["render_frameworks_dropdown"] = render_frameworks_dropdown
context["render_contributor_dropdown"] = render_contributor_dropdown
# Update the HTML page context with a few extra utilities.
context["pygments_highlight_python"] = lambda code: highlight(
code, PythonLexer(), HtmlFormatter()
)
def update_hrefs(input_soup: bs4.BeautifulSoup, n_levels_deep=0):
soup = copy.copy(input_soup)
for a in soup.select("a"):
a["href"] = "../" * n_levels_deep + a["href"]
return soup
def add_nav_chevrons(input_soup: bs4.BeautifulSoup) -> bs4.BeautifulSoup:
"""Add dropdown chevron icons to the header nav bar.
Parameters
----------
input_soup : bs4.BeautifulSoup
Soup containing rendered HTML which will be inserted into the header nav bar
Returns
-------
bs4.BeautifulSoup
A new BeautifulSoup instance containing chevrons on the list items that
are meant to be dropdowns.
"""
soup = copy.copy(input_soup)
for li in soup.find_all("li", recursive=True):
divs = li.find_all("div", {"class": "navbar-dropdown"}, recursive=False)
if divs:
ref = li.find("div", {"class": "ref-container"})
ref.append(soup.new_tag("i", attrs={"class": "fa-solid fa-chevron-down"}))
return soup
def render_example_gallery_dropdown(cls: type) -> bs4.BeautifulSoup:
"""Render a dropdown menu selector for the example gallery.
Parameters
----------
cls : type
ExampleEnum class type to use to populate the dropdown
Returns
-------
bs4.BeautifulSoup
Soup containing the dropdown element
"""
soup = bs4.BeautifulSoup()
dropdown_name = cls.formatted_name().lower().replace(" ", "-")
dropdown_container = soup.new_tag(
"div", attrs={"class": "filter-dropdown", "id": f"{dropdown_name}-dropdown"}
)
dropdown_show_checkbox = soup.new_tag(
"input",
attrs={
"class": "dropdown-checkbox",
"id": f"{dropdown_name}-checkbox",
"type": "checkbox",
},
)
dropdown_container.append(dropdown_show_checkbox)
dropdown_label = soup.new_tag(
"label", attrs={"class": "dropdown-label", "for": f"{dropdown_name}-checkbox"}
)
dropdown_label.append(cls.formatted_name())
chevron = soup.new_tag("i", attrs={"class": "fa-solid fa-chevron-down"})
dropdown_label.append(chevron)
dropdown_container.append(dropdown_label)
if cls.values():
dropdown_options = soup.new_tag("div", attrs={"class": "dropdown-content"})
for member in list(cls):
label = soup.new_tag("label", attrs={"class": "checkbox-container"})
label.append(member.value)
tag = getattr(member, "tag", member.value)
checkbox = soup.new_tag(
"input",
attrs={
"id": f"{tag}-checkbox",
"class": "filter-checkbox",
"type": "checkbox",
},
)
label.append(checkbox)
checkmark = soup.new_tag("span", attrs={"class": "checkmark"})
label.append(checkmark)
dropdown_options.append(label)
dropdown_container.append(dropdown_options)
soup.append(dropdown_container)
return soup
def pregenerate_example_rsts(
app: sphinx.application.Sphinx, *example_configs: Optional[List[str]]
):
"""Pregenerate RST files for the example page configuration files.
This generates RST files for displaying example pages for Ray libraries.
See `add_custom_assets` for more information about the custom template
that gets rendered from these configuration files.
Parameters
----------
*example_configs : Optional[List[str]]
Configuration files for which example pages are to be generated
"""
if not example_configs:
example_configs = EXAMPLE_GALLERY_CONFIGS
for config in example_configs:
# Depending on where the sphinx build command is run from, the path to the
# target config file can change. Handle these paths manually to ensure it
# works on RTD and locally.
config_path = pathlib.Path(app.confdir) / pathlib.Path(config).relative_to(
"source"
)
page_title = "Examples"
title_decoration = "=" * len(page_title)
with open(config_path.with_suffix(".rst"), "w") as f:
f.write(
f"{page_title}\n{title_decoration}\n\n"
" .. this file is pregenerated; please edit ./examples.yml to "
"modify examples for this library."
)
def generate_version_url(version):
return f"https://docs.ray.io/en/{version}/"
def generate_versions_json():
"""Gets the releases from the remote repo, sorts them in semver order,
and generates the JSON needed for the version switcher
"""
ray_prefix = "ray-"
min_version = "1.11.0"
repo_url = "https://github.com/ray-project/ray.git"
static_dir_name = "_static"
version_json_filename = "versions.json"
dereference_suffix = "^{}"
version_json_data = []
# Versions that should always appear at the top
for version in ["latest", "master"]:
version_json_data.append(
{"version": version, "url": generate_version_url(version)}
)
git_versions = []
# Fetch release tags from repo
output = subprocess.check_output(["git", "ls-remote", "--tags", repo_url]).decode(
"utf-8"
)
# Extract release versions from tags
tags = re.findall(r"refs/tags/(.+)", output)
for tag in tags:
if ray_prefix in tag and dereference_suffix not in tag:
version = tag.split(ray_prefix)[1]
if version not in git_versions and Version(version) >= Version(min_version):
git_versions.append(version)
git_versions.sort(key=Version, reverse=True)
for version in git_versions:
version_json_data.append(
{
"version": f"releases-{version}",
"url": generate_version_url(f"releases-{version}"),
}
)
# Ensure static path exists
static_dir = os.path.join(os.path.dirname(__file__), static_dir_name)
if not os.path.exists(static_dir):
os.makedirs(static_dir)
# Write JSON output
output_path = os.path.join(static_dir, version_json_filename)
with open(output_path, "w") as f:
json.dump(version_json_data, f, indent=4)
REMIX_ICONS = [
"ri-24-hours-fill",
"ri-24-hours-line",
"ri-4k-fill",
"ri-4k-line",
"ri-a-b",
"ri-account-box-fill",
"ri-account-box-line",
"ri-account-circle-fill",
"ri-account-circle-line",
"ri-account-pin-box-fill",
"ri-account-pin-box-line",
"ri-account-pin-circle-fill",
"ri-account-pin-circle-line",
"ri-add-box-fill",
"ri-add-box-line",
"ri-add-circle-fill",
"ri-add-circle-line",
"ri-add-fill",
"ri-add-line",
"ri-admin-fill",
"ri-admin-line",
"ri-advertisement-fill",
"ri-advertisement-line",
"ri-airplay-fill",
"ri-airplay-line",
"ri-alarm-fill",
"ri-alarm-line",
"ri-alarm-warning-fill",
"ri-alarm-warning-line",
"ri-album-fill",
"ri-album-line",
"ri-alert-fill",
"ri-alert-line",
"ri-aliens-fill",
"ri-aliens-line",
"ri-align-bottom",
"ri-align-center",
"ri-align-justify",
"ri-align-left",
"ri-align-right",
"ri-align-top",
"ri-align-vertically",
"ri-alipay-fill",
"ri-alipay-line",
"ri-amazon-fill",
"ri-amazon-line",
"ri-anchor-fill",
"ri-anchor-line",
"ri-ancient-gate-fill",
"ri-ancient-gate-line",
"ri-ancient-pavilion-fill",
"ri-ancient-pavilion-line",
"ri-android-fill",
"ri-android-line",
"ri-angularjs-fill",
"ri-angularjs-line",
"ri-anticlockwise-2-fill",
"ri-anticlockwise-2-line",
"ri-anticlockwise-fill",
"ri-anticlockwise-line",
"ri-app-store-fill",
"ri-app-store-line",
"ri-apple-fill",
"ri-apple-line",
"ri-apps-2-fill",
"ri-apps-2-line",
"ri-apps-fill",
"ri-apps-line",
"ri-archive-drawer-fill",
"ri-archive-drawer-line",
"ri-archive-fill",
"ri-archive-line",
"ri-arrow-down-circle-fill",
"ri-arrow-down-circle-line",
"ri-arrow-down-fill",
"ri-arrow-down-line",
"ri-arrow-down-s-fill",
"ri-arrow-down-s-line",
"ri-arrow-drop-down-fill",
"ri-arrow-drop-down-line",
"ri-arrow-drop-left-fill",
"ri-arrow-drop-left-line",
"ri-arrow-drop-right-fill",
"ri-arrow-drop-right-line",
"ri-arrow-drop-up-fill",
"ri-arrow-drop-up-line",
"ri-arrow-go-back-fill",
"ri-arrow-go-back-line",
"ri-arrow-go-forward-fill",
"ri-arrow-go-forward-line",
"ri-arrow-left-circle-fill",
"ri-arrow-left-circle-line",
"ri-arrow-left-down-fill",
"ri-arrow-left-down-line",
"ri-arrow-left-fill",
"ri-arrow-left-line",
"ri-arrow-left-right-fill",
"ri-arrow-left-right-line",
"ri-arrow-left-s-fill",
"ri-arrow-left-s-line",
"ri-arrow-left-up-fill",
"ri-arrow-left-up-line",
"ri-arrow-right-circle-fill",
"ri-arrow-right-circle-line",
"ri-arrow-right-down-fill",
"ri-arrow-right-down-line",
"ri-arrow-right-fill",
"ri-arrow-right-line",
"ri-arrow-right-s-fill",
"ri-arrow-right-s-line",
"ri-arrow-right-up-fill",
"ri-arrow-right-up-line",
"ri-arrow-up-circle-fill",
"ri-arrow-up-circle-line",
"ri-arrow-up-down-fill",
"ri-arrow-up-down-line",
"ri-arrow-up-fill",
"ri-arrow-up-line",
"ri-arrow-up-s-fill",
"ri-arrow-up-s-line",
"ri-artboard-2-fill",
"ri-artboard-2-line",
"ri-artboard-fill",
"ri-artboard-line",
"ri-article-fill",
"ri-article-line",
"ri-aspect-ratio-fill",
"ri-aspect-ratio-line",
"ri-asterisk",
"ri-at-fill",
"ri-at-line",
"ri-attachment-2",
"ri-attachment-fill",
"ri-attachment-line",
"ri-auction-fill",
"ri-auction-line",
"ri-award-fill",
"ri-award-line",
"ri-baidu-fill",
"ri-baidu-line",
"ri-ball-pen-fill",
"ri-ball-pen-line",
"ri-bank-card-2-fill",
"ri-bank-card-2-line",
"ri-bank-card-fill",
"ri-bank-card-line",
"ri-bank-fill",
"ri-bank-line",
"ri-bar-chart-2-fill",
"ri-bar-chart-2-line",
"ri-bar-chart-box-fill",
"ri-bar-chart-box-line",
"ri-bar-chart-fill",
"ri-bar-chart-grouped-fill",
"ri-bar-chart-grouped-line",
"ri-bar-chart-horizontal-fill",
"ri-bar-chart-horizontal-line",
"ri-bar-chart-line",
"ri-barcode-box-fill",
"ri-barcode-box-line",
"ri-barcode-fill",
"ri-barcode-line",
"ri-barricade-fill",
"ri-barricade-line",
"ri-base-station-fill",
"ri-base-station-line",
"ri-basketball-fill",
"ri-basketball-line",
"ri-battery-2-charge-fill",
"ri-battery-2-charge-line",
"ri-battery-2-fill",
"ri-battery-2-line",
"ri-battery-charge-fill",
"ri-battery-charge-line",
"ri-battery-fill",
"ri-battery-line",
"ri-battery-low-fill",
"ri-battery-low-line",
"ri-battery-saver-fill",
"ri-battery-saver-line",
"ri-battery-share-fill",
"ri-battery-share-line",
"ri-bear-smile-fill",
"ri-bear-smile-line",
"ri-behance-fill",
"ri-behance-line",
"ri-bell-fill",
"ri-bell-line",
"ri-bike-fill",
"ri-bike-line",
"ri-bilibili-fill",
"ri-bilibili-line",
"ri-bill-fill",
"ri-bill-line",
"ri-billiards-fill",
"ri-billiards-line",
"ri-bit-coin-fill",
"ri-bit-coin-line",
"ri-blaze-fill",
"ri-blaze-line",
"ri-bluetooth-connect-fill",
"ri-bluetooth-connect-line",
"ri-bluetooth-fill",
"ri-bluetooth-line",
"ri-blur-off-fill",
"ri-blur-off-line",
"ri-body-scan-fill",
"ri-body-scan-line",
"ri-bold",
"ri-book-2-fill",
"ri-book-2-line",
"ri-book-3-fill",
"ri-book-3-line",
"ri-book-fill",
"ri-book-line",
"ri-book-marked-fill",
"ri-book-marked-line",
"ri-book-open-fill",
"ri-book-open-line",
"ri-book-read-fill",
"ri-book-read-line",
"ri-booklet-fill",
"ri-booklet-line",
"ri-bookmark-2-fill",
"ri-bookmark-2-line",
"ri-bookmark-3-fill",
"ri-bookmark-3-line",
"ri-bookmark-fill",
"ri-bookmark-line",
"ri-boxing-fill",
"ri-boxing-line",
"ri-braces-fill",
"ri-braces-line",
"ri-brackets-fill",
"ri-brackets-line",
"ri-briefcase-2-fill",
"ri-briefcase-2-line",
"ri-briefcase-3-fill",
"ri-briefcase-3-line",
"ri-briefcase-4-fill",
"ri-briefcase-4-line",
"ri-briefcase-5-fill",
"ri-briefcase-5-line",
"ri-briefcase-fill",
"ri-briefcase-line",
"ri-bring-forward",
"ri-bring-to-front",
"ri-broadcast-fill",
"ri-broadcast-line",
"ri-brush-2-fill",
"ri-brush-2-line",
"ri-brush-3-fill",
"ri-brush-3-line",
"ri-brush-4-fill",
"ri-brush-4-line",
"ri-brush-fill",
"ri-brush-line",
"ri-bubble-chart-fill",
"ri-bubble-chart-line",
"ri-bug-2-fill",
"ri-bug-2-line",
"ri-bug-fill",
"ri-bug-line",
"ri-building-2-fill",
"ri-building-2-line",
"ri-building-3-fill",
"ri-building-3-line",
"ri-building-4-fill",
"ri-building-4-line",
"ri-building-fill",
"ri-building-line",
"ri-bus-2-fill",
"ri-bus-2-line",
"ri-bus-fill",
"ri-bus-line",
"ri-bus-wifi-fill",
"ri-bus-wifi-line",
"ri-cactus-fill",
"ri-cactus-line",
"ri-cake-2-fill",
"ri-cake-2-line",
"ri-cake-3-fill",
"ri-cake-3-line",
"ri-cake-fill",
"ri-cake-line",
"ri-calculator-fill",
"ri-calculator-line",
"ri-calendar-2-fill",
"ri-calendar-2-line",
"ri-calendar-check-fill",
"ri-calendar-check-line",
"ri-calendar-event-fill",
"ri-calendar-event-line",
"ri-calendar-fill",
"ri-calendar-line",
"ri-calendar-todo-fill",
"ri-calendar-todo-line",
"ri-camera-2-fill",
"ri-camera-2-line",
"ri-camera-3-fill",
"ri-camera-3-line",
"ri-camera-fill",
"ri-camera-lens-fill",
"ri-camera-lens-line",
"ri-camera-line",
"ri-camera-off-fill",
"ri-camera-off-line",
"ri-camera-switch-fill",
"ri-camera-switch-line",
"ri-capsule-fill",
"ri-capsule-line",
"ri-car-fill",
"ri-car-line",
"ri-car-washing-fill",
"ri-car-washing-line",
"ri-caravan-fill",
"ri-caravan-line",
"ri-cast-fill",
"ri-cast-line",
"ri-cellphone-fill",
"ri-cellphone-line",
"ri-celsius-fill",
"ri-celsius-line",
"ri-centos-fill",
"ri-centos-line",
"ri-character-recognition-fill",
"ri-character-recognition-line",
"ri-charging-pile-2-fill",
"ri-charging-pile-2-line",
"ri-charging-pile-fill",
"ri-charging-pile-line",
"ri-chat-1-fill",
"ri-chat-1-line",
"ri-chat-2-fill",
"ri-chat-2-line",
"ri-chat-3-fill",
"ri-chat-3-line",
"ri-chat-4-fill",
"ri-chat-4-line",
"ri-chat-check-fill",
"ri-chat-check-line",
"ri-chat-delete-fill",
"ri-chat-delete-line",
"ri-chat-download-fill",
"ri-chat-download-line",
"ri-chat-follow-up-fill",
"ri-chat-follow-up-line",
"ri-chat-forward-fill",
"ri-chat-forward-line",
"ri-chat-heart-fill",
"ri-chat-heart-line",
"ri-chat-history-fill",
"ri-chat-history-line",
"ri-chat-new-fill",
"ri-chat-new-line",
"ri-chat-off-fill",
"ri-chat-off-line",
"ri-chat-poll-fill",
"ri-chat-poll-line",
"ri-chat-private-fill",
"ri-chat-private-line",
"ri-chat-quote-fill",
"ri-chat-quote-line",
"ri-chat-settings-fill",
"ri-chat-settings-line",
"ri-chat-smile-2-fill",
"ri-chat-smile-2-line",
"ri-chat-smile-3-fill",
"ri-chat-smile-3-line",
"ri-chat-smile-fill",
"ri-chat-smile-line",
"ri-chat-upload-fill",
"ri-chat-upload-line",
"ri-chat-voice-fill",
"ri-chat-voice-line",
"ri-check-double-fill",
"ri-check-double-line",
"ri-check-fill",
"ri-check-line",
"ri-checkbox-blank-circle-fill",
"ri-checkbox-blank-circle-line",
"ri-checkbox-blank-fill",
"ri-checkbox-blank-line",
"ri-checkbox-circle-fill",
"ri-checkbox-circle-line",
"ri-checkbox-fill",
"ri-checkbox-indeterminate-fill",
"ri-checkbox-indeterminate-line",
"ri-checkbox-line",
"ri-checkbox-multiple-blank-fill",
"ri-checkbox-multiple-blank-line",
"ri-checkbox-multiple-fill",
"ri-checkbox-multiple-line",
"ri-china-railway-fill",
"ri-china-railway-line",
"ri-chrome-fill",
"ri-chrome-line",
"ri-clapperboard-fill",
"ri-clapperboard-line",
"ri-clipboard-fill",
"ri-clipboard-line",
"ri-clockwise-2-fill",
"ri-clockwise-2-line",
"ri-clockwise-fill",
"ri-clockwise-line",
"ri-close-circle-fill",
"ri-close-circle-line",
"ri-close-fill",
"ri-close-line",
"ri-closed-captioning-fill",
"ri-closed-captioning-line",
"ri-cloud-fill",
"ri-cloud-line",
"ri-cloud-off-fill",
"ri-cloud-off-line",
"ri-cloud-windy-fill",
"ri-cloud-windy-line",
"ri-cloudy-2-fill",
"ri-cloudy-2-line",
"ri-cloudy-fill",
"ri-cloudy-line",
"ri-code-box-fill",
"ri-code-box-line",
"ri-code-fill",
"ri-code-line",
"ri-code-s-fill",
"ri-code-s-line",
"ri-code-s-slash-fill",
"ri-code-s-slash-line",
"ri-code-view",
"ri-codepen-fill",
"ri-codepen-line",
"ri-coin-fill",
"ri-coin-line",
"ri-coins-fill",
"ri-coins-line",
"ri-collage-fill",
"ri-collage-line",
"ri-command-fill",
"ri-command-line",
"ri-community-fill",
"ri-community-line",
"ri-compass-2-fill",
"ri-compass-2-line",
"ri-compass-3-fill",
"ri-compass-3-line",
"ri-compass-4-fill",
"ri-compass-4-line",
"ri-compass-discover-fill",
"ri-compass-discover-line",
"ri-compass-fill",
"ri-compass-line",
"ri-compasses-2-fill",
"ri-compasses-2-line",
"ri-compasses-fill",
"ri-compasses-line",
"ri-computer-fill",
"ri-computer-line",
"ri-contacts-book-2-fill",
"ri-contacts-book-2-line",
"ri-contacts-book-fill",
"ri-contacts-book-line",
"ri-contacts-book-upload-fill",
"ri-contacts-book-upload-line",
"ri-contacts-fill",
"ri-contacts-line",
"ri-contrast-2-fill",
"ri-contrast-2-line",
"ri-contrast-drop-2-fill",
"ri-contrast-drop-2-line",
"ri-contrast-drop-fill",
"ri-contrast-drop-line",
"ri-contrast-fill",
"ri-contrast-line",
"ri-copper-coin-fill",
"ri-copper-coin-line",
"ri-copper-diamond-fill",
"ri-copper-diamond-line",
"ri-copyleft-fill",
"ri-copyleft-line",
"ri-copyright-fill",
"ri-copyright-line",
"ri-coreos-fill",
"ri-coreos-line",
"ri-coupon-2-fill",
"ri-coupon-2-line",
"ri-coupon-3-fill",
"ri-coupon-3-line",
"ri-coupon-4-fill",
"ri-coupon-4-line",
"ri-coupon-5-fill",
"ri-coupon-5-line",
"ri-coupon-fill",
"ri-coupon-line",
"ri-cpu-fill",
"ri-cpu-line",
"ri-creative-commons-by-fill",
"ri-creative-commons-by-line",
"ri-creative-commons-fill",
"ri-creative-commons-line",
"ri-creative-commons-nc-fill",
"ri-creative-commons-nc-line",
"ri-creative-commons-nd-fill",
"ri-creative-commons-nd-line",
"ri-creative-commons-sa-fill",
"ri-creative-commons-sa-line",
"ri-creative-commons-zero-fill",
"ri-creative-commons-zero-line",
"ri-criminal-fill",
"ri-criminal-line",
"ri-crop-2-fill",
"ri-crop-2-line",
"ri-crop-fill",
"ri-crop-line",
"ri-css3-fill",
"ri-css3-line",
"ri-cup-fill",
"ri-cup-line",
"ri-currency-fill",
"ri-currency-line",
"ri-cursor-fill",
"ri-cursor-line",
"ri-customer-service-2-fill",
"ri-customer-service-2-line",
"ri-customer-service-fill",
"ri-customer-service-line",
"ri-dashboard-2-fill",
"ri-dashboard-2-line",
"ri-dashboard-3-fill",
"ri-dashboard-3-line",
"ri-dashboard-fill",
"ri-dashboard-line",
"ri-database-2-fill",
"ri-database-2-line",
"ri-database-fill",
"ri-database-line",
"ri-delete-back-2-fill",
"ri-delete-back-2-line",
"ri-delete-back-fill",
"ri-delete-back-line",
"ri-delete-bin-2-fill",
"ri-delete-bin-2-line",
"ri-delete-bin-3-fill",
"ri-delete-bin-3-line",
"ri-delete-bin-4-fill",
"ri-delete-bin-4-line",
"ri-delete-bin-5-fill",
"ri-delete-bin-5-line",
"ri-delete-bin-6-fill",
"ri-delete-bin-6-line",
"ri-delete-bin-7-fill",
"ri-delete-bin-7-line",
"ri-delete-bin-fill",
"ri-delete-bin-line",
"ri-delete-column",
"ri-delete-row",
"ri-device-fill",
"ri-device-line",
"ri-device-recover-fill",
"ri-device-recover-line",
"ri-dingding-fill",
"ri-dingding-line",
"ri-direction-fill",
"ri-direction-line",
"ri-disc-fill",
"ri-disc-line",
"ri-discord-fill",
"ri-discord-line",
"ri-discuss-fill",
"ri-discuss-line",
"ri-dislike-fill",
"ri-dislike-line",
"ri-disqus-fill",
"ri-disqus-line",
"ri-divide-fill",
"ri-divide-line",
"ri-donut-chart-fill",
"ri-donut-chart-line",
"ri-door-closed-fill",
"ri-door-closed-line",
"ri-door-fill",
"ri-door-line",
"ri-door-lock-box-fill",
"ri-door-lock-box-line",
"ri-door-lock-fill",
"ri-door-lock-line",
"ri-door-open-fill",
"ri-door-open-line",
"ri-dossier-fill",
"ri-dossier-line",
"ri-douban-fill",
"ri-douban-line",
"ri-double-quotes-l",
"ri-double-quotes-r",
"ri-download-2-fill",
"ri-download-2-line",
"ri-download-cloud-2-fill",
"ri-download-cloud-2-line",
"ri-download-cloud-fill",
"ri-download-cloud-line",
"ri-download-fill",
"ri-download-line",
"ri-draft-fill",
"ri-draft-line",
"ri-drag-drop-fill",
"ri-drag-drop-line",
"ri-drag-move-2-fill",
"ri-drag-move-2-line",
"ri-drag-move-fill",
"ri-drag-move-line",
"ri-dribbble-fill",
"ri-dribbble-line",
"ri-drive-fill",
"ri-drive-line",
"ri-drizzle-fill",
"ri-drizzle-line",
"ri-drop-fill",
"ri-drop-line",
"ri-dropbox-fill",
"ri-dropbox-line",
"ri-dual-sim-1-fill",
"ri-dual-sim-1-line",
"ri-dual-sim-2-fill",
"ri-dual-sim-2-line",
"ri-dv-fill",
"ri-dv-line",
"ri-dvd-fill",
"ri-dvd-line",
"ri-e-bike-2-fill",
"ri-e-bike-2-line",
"ri-e-bike-fill",
"ri-e-bike-line",
"ri-earth-fill",
"ri-earth-line",
"ri-earthquake-fill",
"ri-earthquake-line",
"ri-edge-fill",
"ri-edge-line",
"ri-edit-2-fill",
"ri-edit-2-line",
"ri-edit-box-fill",
"ri-edit-box-line",
"ri-edit-circle-fill",
"ri-edit-circle-line",
"ri-edit-fill",
"ri-edit-line",
"ri-eject-fill",
"ri-eject-line",
"ri-emotion-2-fill",
"ri-emotion-2-line",
"ri-emotion-fill",
"ri-emotion-happy-fill",
"ri-emotion-happy-line",
"ri-emotion-laugh-fill",
"ri-emotion-laugh-line",
"ri-emotion-line",
"ri-emotion-normal-fill",
"ri-emotion-normal-line",
"ri-emotion-sad-fill",
"ri-emotion-sad-line",
"ri-emotion-unhappy-fill",
"ri-emotion-unhappy-line",
"ri-empathize-fill",
"ri-empathize-line",
"ri-emphasis-cn",
"ri-emphasis",
"ri-english-input",
"ri-equalizer-fill",
"ri-equalizer-line",
"ri-eraser-fill",
"ri-eraser-line",
"ri-error-warning-fill",
"ri-error-warning-line",
"ri-evernote-fill",
"ri-evernote-line",
"ri-exchange-box-fill",
"ri-exchange-box-line",
"ri-exchange-cny-fill",
"ri-exchange-cny-line",
"ri-exchange-dollar-fill",
"ri-exchange-dollar-line",
"ri-exchange-fill",
"ri-exchange-funds-fill",
"ri-exchange-funds-line",
"ri-exchange-line",
"ri-external-link-fill",
"ri-external-link-line",
"ri-eye-2-fill",
"ri-eye-2-line",
"ri-eye-close-fill",
"ri-eye-close-line",
"ri-eye-fill",
"ri-eye-line",
"ri-eye-off-fill",
"ri-eye-off-line",
"ri-facebook-box-fill",
"ri-facebook-box-line",
"ri-facebook-circle-fill",
"ri-facebook-circle-line",
"ri-facebook-fill",
"ri-facebook-line",
"ri-fahrenheit-fill",
"ri-fahrenheit-line",
"ri-feedback-fill",
"ri-feedback-line",
"ri-file-2-fill",
"ri-file-2-line",
"ri-file-3-fill",
"ri-file-3-line",
"ri-file-4-fill",
"ri-file-4-line",
"ri-file-add-fill",
"ri-file-add-line",
"ri-file-chart-2-fill",
"ri-file-chart-2-line",
"ri-file-chart-fill",
"ri-file-chart-line",
"ri-file-cloud-fill",
"ri-file-cloud-line",
"ri-file-code-fill",
"ri-file-code-line",
"ri-file-copy-2-fill",
"ri-file-copy-2-line",
"ri-file-copy-fill",
"ri-file-copy-line",
"ri-file-damage-fill",
"ri-file-damage-line",
"ri-file-download-fill",
"ri-file-download-line",
"ri-file-edit-fill",
"ri-file-edit-line",
"ri-file-excel-2-fill",
"ri-file-excel-2-line",
"ri-file-excel-fill",
"ri-file-excel-line",
"ri-file-fill",
"ri-file-forbid-fill",
"ri-file-forbid-line",
"ri-file-gif-fill",
"ri-file-gif-line",
"ri-file-history-fill",
"ri-file-history-line",
"ri-file-hwp-fill",
"ri-file-hwp-line",
"ri-file-info-fill",
"ri-file-info-line",
"ri-file-line",
"ri-file-list-2-fill",
"ri-file-list-2-line",
"ri-file-list-3-fill",
"ri-file-list-3-line",
"ri-file-list-fill",
"ri-file-list-line",
"ri-file-lock-fill",
"ri-file-lock-line",
"ri-file-marked-fill",
"ri-file-marked-line",
"ri-file-music-fill",
"ri-file-music-line",
"ri-file-paper-2-fill",
"ri-file-paper-2-line",
"ri-file-paper-fill",
"ri-file-paper-line",
"ri-file-pdf-fill",
"ri-file-pdf-line",
"ri-file-ppt-2-fill",
"ri-file-ppt-2-line",
"ri-file-ppt-fill",
"ri-file-ppt-line",
"ri-file-reduce-fill",
"ri-file-reduce-line",
"ri-file-search-fill",
"ri-file-search-line",
"ri-file-settings-fill",
"ri-file-settings-line",
"ri-file-shield-2-fill",
"ri-file-shield-2-line",
"ri-file-shield-fill",
"ri-file-shield-line",
"ri-file-shred-fill",
"ri-file-shred-line",
"ri-file-text-fill",
"ri-file-text-line",
"ri-file-transfer-fill",
"ri-file-transfer-line",
"ri-file-unknow-fill",
"ri-file-unknow-line",
"ri-file-upload-fill",
"ri-file-upload-line",
"ri-file-user-fill",
"ri-file-user-line",
"ri-file-warning-fill",
"ri-file-warning-line",
"ri-file-word-2-fill",
"ri-file-word-2-line",
"ri-file-word-fill",
"ri-file-word-line",
"ri-file-zip-fill",
"ri-file-zip-line",
"ri-film-fill",
"ri-film-line",
"ri-filter-2-fill",
"ri-filter-2-line",
"ri-filter-3-fill",
"ri-filter-3-line",
"ri-filter-fill",
"ri-filter-line",
"ri-filter-off-fill",
"ri-filter-off-line",
"ri-find-replace-fill",
"ri-find-replace-line",
"ri-finder-fill",
"ri-finder-line",
"ri-fingerprint-2-fill",
"ri-fingerprint-2-line",
"ri-fingerprint-fill",
"ri-fingerprint-line",
"ri-fire-fill",
"ri-fire-line",
"ri-firefox-fill",
"ri-firefox-line",
"ri-first-aid-kit-fill",
"ri-first-aid-kit-line",
"ri-flag-2-fill",
"ri-flag-2-line",
"ri-flag-fill",
"ri-flag-line",
"ri-flashlight-fill",
"ri-flashlight-line",
"ri-flask-fill",
"ri-flask-line",
"ri-flight-land-fill",
"ri-flight-land-line",
"ri-flight-takeoff-fill",
"ri-flight-takeoff-line",
"ri-flood-fill",
"ri-flood-line",
"ri-flow-chart",
"ri-flutter-fill",
"ri-flutter-line",
"ri-focus-2-fill",
"ri-focus-2-line",
"ri-focus-3-fill",
"ri-focus-3-line",
"ri-focus-fill",
"ri-focus-line",
"ri-foggy-fill",
"ri-foggy-line",
"ri-folder-2-fill",
"ri-folder-2-line",
"ri-folder-3-fill",
"ri-folder-3-line",
"ri-folder-4-fill",
"ri-folder-4-line",
"ri-folder-5-fill",
"ri-folder-5-line",
"ri-folder-add-fill",
"ri-folder-add-line",
"ri-folder-chart-2-fill",
"ri-folder-chart-2-line",
"ri-folder-chart-fill",
"ri-folder-chart-line",
"ri-folder-download-fill",
"ri-folder-download-line",
"ri-folder-fill",
"ri-folder-forbid-fill",
"ri-folder-forbid-line",
"ri-folder-history-fill",
"ri-folder-history-line",
"ri-folder-info-fill",
"ri-folder-info-line",
"ri-folder-keyhole-fill",
"ri-folder-keyhole-line",
"ri-folder-line",
"ri-folder-lock-fill",
"ri-folder-lock-line",
"ri-folder-music-fill",
"ri-folder-music-line",
"ri-folder-open-fill",
"ri-folder-open-line",
"ri-folder-received-fill",
"ri-folder-received-line",
"ri-folder-reduce-fill",
"ri-folder-reduce-line",
"ri-folder-settings-fill",
"ri-folder-settings-line",
"ri-folder-shared-fill",
"ri-folder-shared-line",
"ri-folder-shield-2-fill",
"ri-folder-shield-2-line",
"ri-folder-shield-fill",
"ri-folder-shield-line",
"ri-folder-transfer-fill",
"ri-folder-transfer-line",
"ri-folder-unknow-fill",
"ri-folder-unknow-line",
"ri-folder-upload-fill",
"ri-folder-upload-line",
"ri-folder-user-fill",
"ri-folder-user-line",
"ri-folder-warning-fill",
"ri-folder-warning-line",
"ri-folder-zip-fill",
"ri-folder-zip-line",
"ri-folders-fill",
"ri-folders-line",
"ri-font-color",
"ri-font-size-2",
"ri-font-size",
"ri-football-fill",
"ri-football-line",
"ri-footprint-fill",
"ri-footprint-line",
"ri-forbid-2-fill",
"ri-forbid-2-line",
"ri-forbid-fill",
"ri-forbid-line",
"ri-format-clear",
"ri-fridge-fill",
"ri-fridge-line",
"ri-fullscreen-exit-fill",
"ri-fullscreen-exit-line",
"ri-fullscreen-fill",
"ri-fullscreen-line",
"ri-function-fill",
"ri-function-line",
"ri-functions",
"ri-funds-box-fill",
"ri-funds-box-line",
"ri-funds-fill",
"ri-funds-line",
"ri-gallery-fill",
"ri-gallery-line",
"ri-gallery-upload-fill",
"ri-gallery-upload-line",
"ri-game-fill",
"ri-game-line",
"ri-gamepad-fill",
"ri-gamepad-line",
"ri-gas-station-fill",
"ri-gas-station-line",
"ri-gatsby-fill",
"ri-gatsby-line",
"ri-genderless-fill",
"ri-genderless-line",
"ri-ghost-2-fill",
"ri-ghost-2-line",
"ri-ghost-fill",
"ri-ghost-line",
"ri-ghost-smile-fill",
"ri-ghost-smile-line",
"ri-gift-2-fill",
"ri-gift-2-line",
"ri-gift-fill",
"ri-gift-line",
"ri-git-branch-fill",
"ri-git-branch-line",
"ri-git-commit-fill",
"ri-git-commit-line",
"ri-git-merge-fill",
"ri-git-merge-line",
"ri-git-pull-request-fill",
"ri-git-pull-request-line",
"ri-git-repository-commits-fill",
"ri-git-repository-commits-line",
"ri-git-repository-fill",
"ri-git-repository-line",
"ri-git-repository-private-fill",
"ri-git-repository-private-line",
"ri-github-fill",
"ri-github-line",
"ri-gitlab-fill",
"ri-gitlab-line",
"ri-global-fill",
"ri-global-line",
"ri-globe-fill",
"ri-globe-line",
"ri-goblet-fill",
"ri-goblet-line",
"ri-google-fill",
"ri-google-line",
"ri-google-play-fill",
"ri-google-play-line",
"ri-government-fill",
"ri-government-line",
"ri-gps-fill",
"ri-gps-line",
"ri-gradienter-fill",
"ri-gradienter-line",
"ri-grid-fill",
"ri-grid-line",
"ri-group-2-fill",
"ri-group-2-line",
"ri-group-fill",
"ri-group-line",
"ri-guide-fill",
"ri-guide-line",
"ri-h-1",
"ri-h-2",
"ri-h-3",
"ri-h-4",
"ri-h-5",
"ri-h-6",
"ri-hail-fill",
"ri-hail-line",
"ri-hammer-fill",
"ri-hammer-line",
"ri-hand-coin-fill",
"ri-hand-coin-line",
"ri-hand-heart-fill",
"ri-hand-heart-line",
"ri-hand-sanitizer-fill",
"ri-hand-sanitizer-line",
"ri-handbag-fill",
"ri-handbag-line",
"ri-hard-drive-2-fill",
"ri-hard-drive-2-line",
"ri-hard-drive-fill",
"ri-hard-drive-line",
"ri-hashtag",
"ri-haze-2-fill",
"ri-haze-2-line",
"ri-haze-fill",
"ri-haze-line",
"ri-hd-fill",
"ri-hd-line",
"ri-heading",
"ri-headphone-fill",
"ri-headphone-line",
"ri-health-book-fill",
"ri-health-book-line",
"ri-heart-2-fill",
"ri-heart-2-line",
"ri-heart-3-fill",
"ri-heart-3-line",
"ri-heart-add-fill",
"ri-heart-add-line",
"ri-heart-fill",
"ri-heart-line",
"ri-heart-pulse-fill",
"ri-heart-pulse-line",
"ri-hearts-fill",
"ri-hearts-line",
"ri-heavy-showers-fill",
"ri-heavy-showers-line",
"ri-history-fill",
"ri-history-line",
"ri-home-2-fill",
"ri-home-2-line",
"ri-home-3-fill",
"ri-home-3-line",
"ri-home-4-fill",
"ri-home-4-line",
"ri-home-5-fill",
"ri-home-5-line",
"ri-home-6-fill",
"ri-home-6-line",
"ri-home-7-fill",
"ri-home-7-line",
"ri-home-8-fill",
"ri-home-8-line",
"ri-home-fill",
"ri-home-gear-fill",
"ri-home-gear-line",
"ri-home-heart-fill",
"ri-home-heart-line",
"ri-home-line",
"ri-home-smile-2-fill",
"ri-home-smile-2-line",
"ri-home-smile-fill",
"ri-home-smile-line",
"ri-home-wifi-fill",
"ri-home-wifi-line",
"ri-honor-of-kings-fill",
"ri-honor-of-kings-line",
"ri-honour-fill",
"ri-honour-line",
"ri-hospital-fill",
"ri-hospital-line",
"ri-hotel-bed-fill",
"ri-hotel-bed-line",
"ri-hotel-fill",
"ri-hotel-line",
"ri-hotspot-fill",
"ri-hotspot-line",
"ri-hq-fill",
"ri-hq-line",
"ri-html5-fill",
"ri-html5-line",
"ri-ie-fill",
"ri-ie-line",
"ri-image-2-fill",
"ri-image-2-line",
"ri-image-add-fill",
"ri-image-add-line",
"ri-image-edit-fill",
"ri-image-edit-line",
"ri-image-fill",
"ri-image-line",
"ri-inbox-archive-fill",
"ri-inbox-archive-line",
"ri-inbox-fill",
"ri-inbox-line",
"ri-inbox-unarchive-fill",
"ri-inbox-unarchive-line",
"ri-increase-decrease-fill",
"ri-increase-decrease-line",
"ri-indent-decrease",
"ri-indent-increase",
"ri-indeterminate-circle-fill",
"ri-indeterminate-circle-line",
"ri-information-fill",
"ri-information-line",
"ri-infrared-thermometer-fill",
"ri-infrared-thermometer-line",
"ri-ink-bottle-fill",
"ri-ink-bottle-line",
"ri-input-cursor-move",
"ri-input-method-fill",
"ri-input-method-line",
"ri-insert-column-left",
"ri-insert-column-right",
"ri-insert-row-bottom",
"ri-insert-row-top",
"ri-instagram-fill",
"ri-instagram-line",
"ri-install-fill",
"ri-install-line",
"ri-invision-fill",
"ri-invision-line",
"ri-italic",
"ri-kakao-talk-fill",
"ri-kakao-talk-line",
"ri-key-2-fill",
"ri-key-2-line",
"ri-key-fill",
"ri-key-line",
"ri-keyboard-box-fill",
"ri-keyboard-box-line",
"ri-keyboard-fill",
"ri-keyboard-line",
"ri-keynote-fill",
"ri-keynote-line",
"ri-knife-blood-fill",
"ri-knife-blood-line",
"ri-knife-fill",
"ri-knife-line",
"ri-landscape-fill",
"ri-landscape-line",
"ri-layout-2-fill",
"ri-layout-2-line",
"ri-layout-3-fill",
"ri-layout-3-line",
"ri-layout-4-fill",
"ri-layout-4-line",
"ri-layout-5-fill",
"ri-layout-5-line",
"ri-layout-6-fill",
"ri-layout-6-line",
"ri-layout-bottom-2-fill",
"ri-layout-bottom-2-line",
"ri-layout-bottom-fill",
"ri-layout-bottom-line",
"ri-layout-column-fill",
"ri-layout-column-line",
"ri-layout-fill",
"ri-layout-grid-fill",
"ri-layout-grid-line",
"ri-layout-left-2-fill",
"ri-layout-left-2-line",
"ri-layout-left-fill",
"ri-layout-left-line",
"ri-layout-line",
"ri-layout-masonry-fill",
"ri-layout-masonry-line",
"ri-layout-right-2-fill",
"ri-layout-right-2-line",
"ri-layout-right-fill",
"ri-layout-right-line",
"ri-layout-row-fill",
"ri-layout-row-line",
"ri-layout-top-2-fill",
"ri-layout-top-2-line",
"ri-layout-top-fill",
"ri-layout-top-line",
"ri-leaf-fill",
"ri-leaf-line",
"ri-lifebuoy-fill",
"ri-lifebuoy-line",
"ri-lightbulb-fill",
"ri-lightbulb-flash-fill",
"ri-lightbulb-flash-line",
"ri-lightbulb-line",
"ri-line-chart-fill",
"ri-line-chart-line",
"ri-line-fill",
"ri-line-height",
"ri-line-line",
"ri-link-m",
"ri-link-unlink-m",
"ri-link-unlink",
"ri-link",
"ri-linkedin-box-fill",
"ri-linkedin-box-line",
"ri-linkedin-fill",
"ri-linkedin-line",
"ri-links-fill",
"ri-links-line",
"ri-list-check-2",
"ri-list-check",
"ri-list-ordered",
"ri-list-settings-fill",
"ri-list-settings-line",
"ri-list-unordered",
"ri-live-fill",
"ri-live-line",
"ri-loader-2-fill",
"ri-loader-2-line",
"ri-loader-3-fill",
"ri-loader-3-line",
"ri-loader-4-fill",
"ri-loader-4-line",
"ri-loader-5-fill",
"ri-loader-5-line",
"ri-loader-fill",
"ri-loader-line",
"ri-lock-2-fill",
"ri-lock-2-line",
"ri-lock-fill",
"ri-lock-line",
"ri-lock-password-fill",
"ri-lock-password-line",
"ri-lock-unlock-fill",
"ri-lock-unlock-line",
"ri-login-box-fill",
"ri-login-box-line",
"ri-login-circle-fill",
"ri-login-circle-line",
"ri-logout-box-fill",
"ri-logout-box-line",
"ri-logout-box-r-fill",
"ri-logout-box-r-line",
"ri-logout-circle-fill",
"ri-logout-circle-line",
"ri-logout-circle-r-fill",
"ri-logout-circle-r-line",
"ri-luggage-cart-fill",
"ri-luggage-cart-line",
"ri-luggage-deposit-fill",
"ri-luggage-deposit-line",
"ri-lungs-fill",
"ri-lungs-line",
"ri-mac-fill",
"ri-mac-line",
"ri-macbook-fill",
"ri-macbook-line",
"ri-magic-fill",
"ri-magic-line",
"ri-mail-add-fill",
"ri-mail-add-line",
"ri-mail-check-fill",
"ri-mail-check-line",
"ri-mail-close-fill",
"ri-mail-close-line",
"ri-mail-download-fill",
"ri-mail-download-line",
"ri-mail-fill",
"ri-mail-forbid-fill",
"ri-mail-forbid-line",
"ri-mail-line",
"ri-mail-lock-fill",
"ri-mail-lock-line",
"ri-mail-open-fill",
"ri-mail-open-line",
"ri-mail-send-fill",
"ri-mail-send-line",
"ri-mail-settings-fill",
"ri-mail-settings-line",
"ri-mail-star-fill",
"ri-mail-star-line",
"ri-mail-unread-fill",
"ri-mail-unread-line",
"ri-mail-volume-fill",
"ri-mail-volume-line",
"ri-map-2-fill",
"ri-map-2-line",
"ri-map-fill",
"ri-map-line",
"ri-map-pin-2-fill",
"ri-map-pin-2-line",
"ri-map-pin-3-fill",
"ri-map-pin-3-line",
"ri-map-pin-4-fill",
"ri-map-pin-4-line",
"ri-map-pin-5-fill",
"ri-map-pin-5-line",
"ri-map-pin-add-fill",
"ri-map-pin-add-line",
"ri-map-pin-fill",
"ri-map-pin-line",
"ri-map-pin-range-fill",
"ri-map-pin-range-line",
"ri-map-pin-time-fill",
"ri-map-pin-time-line",
"ri-map-pin-user-fill",
"ri-map-pin-user-line",
"ri-mark-pen-fill",
"ri-mark-pen-line",
"ri-markdown-fill",
"ri-markdown-line",
"ri-markup-fill",
"ri-markup-line",
"ri-mastercard-fill",
"ri-mastercard-line",
"ri-mastodon-fill",
"ri-mastodon-line",
"ri-medal-2-fill",
"ri-medal-2-line",
"ri-medal-fill",
"ri-medal-line",
"ri-medicine-bottle-fill",
"ri-medicine-bottle-line",
"ri-medium-fill",
"ri-medium-line",
"ri-men-fill",
"ri-men-line",
"ri-mental-health-fill",
"ri-mental-health-line",
"ri-menu-2-fill",
"ri-menu-2-line",
"ri-menu-3-fill",
"ri-menu-3-line",
"ri-menu-4-fill",
"ri-menu-4-line",
"ri-menu-5-fill",
"ri-menu-5-line",
"ri-menu-add-fill",
"ri-menu-add-line",
"ri-menu-fill",
"ri-menu-fold-fill",
"ri-menu-fold-line",
"ri-menu-line",
"ri-menu-unfold-fill",
"ri-menu-unfold-line",
"ri-merge-cells-horizontal",
"ri-merge-cells-vertical",
"ri-message-2-fill",
"ri-message-2-line",
"ri-message-3-fill",
"ri-message-3-line",
"ri-message-fill",
"ri-message-line",
"ri-messenger-fill",
"ri-messenger-line",
"ri-meteor-fill",
"ri-meteor-line",
"ri-mic-2-fill",
"ri-mic-2-line",
"ri-mic-fill",
"ri-mic-line",
"ri-mic-off-fill",
"ri-mic-off-line",
"ri-mickey-fill",
"ri-mickey-line",
"ri-microscope-fill",
"ri-microscope-line",
"ri-microsoft-fill",
"ri-microsoft-line",
"ri-mind-map",
"ri-mini-program-fill",
"ri-mini-program-line",
"ri-mist-fill",
"ri-mist-line",
"ri-money-cny-box-fill",
"ri-money-cny-box-line",
"ri-money-cny-circle-fill",
"ri-money-cny-circle-line",
"ri-money-dollar-box-fill",
"ri-money-dollar-box-line",
"ri-money-dollar-circle-fill",
"ri-money-dollar-circle-line",
"ri-money-euro-box-fill",
"ri-money-euro-box-line",
"ri-money-euro-circle-fill",
"ri-money-euro-circle-line",
"ri-money-pound-box-fill",
"ri-money-pound-box-line",
"ri-money-pound-circle-fill",
"ri-money-pound-circle-line",
"ri-moon-clear-fill",
"ri-moon-clear-line",
"ri-moon-cloudy-fill",
"ri-moon-cloudy-line",
"ri-moon-fill",
"ri-moon-foggy-fill",
"ri-moon-foggy-line",
"ri-moon-line",
"ri-more-2-fill",
"ri-more-2-line",
"ri-more-fill",
"ri-more-line",
"ri-motorbike-fill",
"ri-motorbike-line",
"ri-mouse-fill",
"ri-mouse-line",
"ri-movie-2-fill",
"ri-movie-2-line",
"ri-movie-fill",
"ri-movie-line",
"ri-music-2-fill",
"ri-music-2-line",
"ri-music-fill",
"ri-music-line",
"ri-mv-fill",
"ri-mv-line",
"ri-navigation-fill",
"ri-navigation-line",
"ri-netease-cloud-music-fill",
"ri-netease-cloud-music-line",
"ri-netflix-fill",
"ri-netflix-line",
"ri-newspaper-fill",
"ri-newspaper-line",
"ri-node-tree",
"ri-notification-2-fill",
"ri-notification-2-line",
"ri-notification-3-fill",
"ri-notification-3-line",
"ri-notification-4-fill",
"ri-notification-4-line",
"ri-notification-badge-fill",
"ri-notification-badge-line",
"ri-notification-fill",
"ri-notification-line",
"ri-notification-off-fill",
"ri-notification-off-line",
"ri-npmjs-fill",
"ri-npmjs-line",
"ri-number-0",
"ri-number-1",
"ri-number-2",
"ri-number-3",
"ri-number-4",
"ri-number-5",
"ri-number-6",
"ri-number-7",
"ri-number-8",
"ri-number-9",
"ri-numbers-fill",
"ri-numbers-line",
"ri-nurse-fill",
"ri-nurse-line",
"ri-oil-fill",
"ri-oil-line",
"ri-omega",
"ri-open-arm-fill",
"ri-open-arm-line",
"ri-open-source-fill",
"ri-open-source-line",
"ri-opera-fill",
"ri-opera-line",
"ri-order-play-fill",
"ri-order-play-line",
"ri-organization-chart",
"ri-outlet-2-fill",
"ri-outlet-2-line",
"ri-outlet-fill",
"ri-outlet-line",
"ri-page-separator",
"ri-pages-fill",
"ri-pages-line",
"ri-paint-brush-fill",
"ri-paint-brush-line",
"ri-paint-fill",
"ri-paint-line",
"ri-palette-fill",
"ri-palette-line",
"ri-pantone-fill",
"ri-pantone-line",
"ri-paragraph",
"ri-parent-fill",
"ri-parent-line",
"ri-parentheses-fill",
"ri-parentheses-line",
"ri-parking-box-fill",
"ri-parking-box-line",
"ri-parking-fill",
"ri-parking-line",
"ri-passport-fill",
"ri-passport-line",
"ri-patreon-fill",
"ri-patreon-line",
"ri-pause-circle-fill",
"ri-pause-circle-line",
"ri-pause-fill",
"ri-pause-line",
"ri-pause-mini-fill",
"ri-pause-mini-line",
"ri-paypal-fill",
"ri-paypal-line",
"ri-pen-nib-fill",
"ri-pen-nib-line",
"ri-pencil-fill",
"ri-pencil-line",
"ri-pencil-ruler-2-fill",
"ri-pencil-ruler-2-line",
"ri-pencil-ruler-fill",
"ri-pencil-ruler-line",
"ri-percent-fill",
"ri-percent-line",
"ri-phone-camera-fill",
"ri-phone-camera-line",
"ri-phone-fill",
"ri-phone-find-fill",
"ri-phone-find-line",
"ri-phone-line",
"ri-phone-lock-fill",
"ri-phone-lock-line",
"ri-picture-in-picture-2-fill",
"ri-picture-in-picture-2-line",
"ri-picture-in-picture-exit-fill",
"ri-picture-in-picture-exit-line",
"ri-picture-in-picture-fill",
"ri-picture-in-picture-line",
"ri-pie-chart-2-fill",
"ri-pie-chart-2-line",
"ri-pie-chart-box-fill",
"ri-pie-chart-box-line",
"ri-pie-chart-fill",
"ri-pie-chart-line",
"ri-pin-distance-fill",
"ri-pin-distance-line",
"ri-ping-pong-fill",
"ri-ping-pong-line",
"ri-pinterest-fill",
"ri-pinterest-line",
"ri-pinyin-input",
"ri-pixelfed-fill",
"ri-pixelfed-line",
"ri-plane-fill",
"ri-plane-line",
"ri-plant-fill",
"ri-plant-line",
"ri-play-circle-fill",
"ri-play-circle-line",
"ri-play-fill",
"ri-play-line",
"ri-play-list-2-fill",
"ri-play-list-2-line",
"ri-play-list-add-fill",
"ri-play-list-add-line",
"ri-play-list-fill",
"ri-play-list-line",
"ri-play-mini-fill",
"ri-play-mini-line",
"ri-playstation-fill",
"ri-playstation-line",
"ri-plug-2-fill",
"ri-plug-2-line",
"ri-plug-fill",
"ri-plug-line",
"ri-polaroid-2-fill",
"ri-polaroid-2-line",
"ri-polaroid-fill",
"ri-polaroid-line",
"ri-police-car-fill",
"ri-police-car-line",
"ri-price-tag-2-fill",
"ri-price-tag-2-line",
"ri-price-tag-3-fill",
"ri-price-tag-3-line",
"ri-price-tag-fill",
"ri-price-tag-line",
"ri-printer-cloud-fill",
"ri-printer-cloud-line",
"ri-printer-fill",
"ri-printer-line",
"ri-product-hunt-fill",
"ri-product-hunt-line",
"ri-profile-fill",
"ri-profile-line",
"ri-projector-2-fill",
"ri-projector-2-line",
"ri-projector-fill",
"ri-projector-line",
"ri-psychotherapy-fill",
"ri-psychotherapy-line",
"ri-pulse-fill",
"ri-pulse-line",
"ri-pushpin-2-fill",
"ri-pushpin-2-line",
"ri-pushpin-fill",
"ri-pushpin-line",
"ri-qq-fill",
"ri-qq-line",
"ri-qr-code-fill",
"ri-qr-code-line",
"ri-qr-scan-2-fill",
"ri-qr-scan-2-line",
"ri-qr-scan-fill",
"ri-qr-scan-line",
"ri-question-answer-fill",
"ri-question-answer-line",
"ri-question-fill",
"ri-question-line",
"ri-question-mark",
"ri-questionnaire-fill",
"ri-questionnaire-line",
"ri-quill-pen-fill",
"ri-quill-pen-line",
"ri-radar-fill",
"ri-radar-line",
"ri-radio-2-fill",
"ri-radio-2-line",
"ri-radio-button-fill",
"ri-radio-button-line",
"ri-radio-fill",
"ri-radio-line",
"ri-rainbow-fill",
"ri-rainbow-line",
"ri-rainy-fill",
"ri-rainy-line",
"ri-reactjs-fill",
"ri-reactjs-line",
"ri-record-circle-fill",
"ri-record-circle-line",
"ri-record-mail-fill",
"ri-record-mail-line",
"ri-recycle-fill",
"ri-recycle-line",
"ri-red-packet-fill",
"ri-red-packet-line",
"ri-reddit-fill",
"ri-reddit-line",
"ri-refresh-fill",
"ri-refresh-line",
"ri-refund-2-fill",
"ri-refund-2-line",
"ri-refund-fill",
"ri-refund-line",
"ri-registered-fill",
"ri-registered-line",
"ri-remixicon-fill",
"ri-remixicon-line",
"ri-remote-control-2-fill",
"ri-remote-control-2-line",
"ri-remote-control-fill",
"ri-remote-control-line",
"ri-repeat-2-fill",
"ri-repeat-2-line",
"ri-repeat-fill",
"ri-repeat-line",
"ri-repeat-one-fill",
"ri-repeat-one-line",
"ri-reply-all-fill",
"ri-reply-all-line",
"ri-reply-fill",
"ri-reply-line",
"ri-reserved-fill",
"ri-reserved-line",
"ri-rest-time-fill",
"ri-rest-time-line",
"ri-restart-fill",
"ri-restart-line",
"ri-restaurant-2-fill",
"ri-restaurant-2-line",
"ri-restaurant-fill",
"ri-restaurant-line",
"ri-rewind-fill",
"ri-rewind-line",
"ri-rewind-mini-fill",
"ri-rewind-mini-line",
"ri-rhythm-fill",
"ri-rhythm-line",
"ri-riding-fill",
"ri-riding-line",
"ri-road-map-fill",
"ri-road-map-line",
"ri-roadster-fill",
"ri-roadster-line",
"ri-robot-fill",
"ri-robot-line",
"ri-rocket-2-fill",
"ri-rocket-2-line",
"ri-rocket-fill",
"ri-rocket-line",
"ri-rotate-lock-fill",
"ri-rotate-lock-line",
"ri-rounded-corner",
"ri-route-fill",
"ri-route-line",
"ri-router-fill",
"ri-router-line",
"ri-rss-fill",
"ri-rss-line",
"ri-ruler-2-fill",
"ri-ruler-2-line",
"ri-ruler-fill",
"ri-ruler-line",
"ri-run-fill",
"ri-run-line",
"ri-safari-fill",
"ri-safari-line",
"ri-safe-2-fill",
"ri-safe-2-line",
"ri-safe-fill",
"ri-safe-line",
"ri-sailboat-fill",
"ri-sailboat-line",
"ri-save-2-fill",
"ri-save-2-line",
"ri-save-3-fill",
"ri-save-3-line",
"ri-save-fill",
"ri-save-line",
"ri-scales-2-fill",
"ri-scales-2-line",
"ri-scales-3-fill",
"ri-scales-3-line",
"ri-scales-fill",
"ri-scales-line",
"ri-scan-2-fill",
"ri-scan-2-line",
"ri-scan-fill",
"ri-scan-line",
"ri-scissors-2-fill",
"ri-scissors-2-line",
"ri-scissors-cut-fill",
"ri-scissors-cut-line",
"ri-scissors-fill",
"ri-scissors-line",
"ri-screenshot-2-fill",
"ri-screenshot-2-line",
"ri-screenshot-fill",
"ri-screenshot-line",
"ri-sd-card-fill",
"ri-sd-card-line",
"ri-sd-card-mini-fill",
"ri-sd-card-mini-line",
"ri-search-2-fill",
"ri-search-2-line",
"ri-search-eye-fill",
"ri-search-eye-line",
"ri-search-fill",
"ri-search-line",
"ri-secure-payment-fill",
"ri-secure-payment-line",
"ri-seedling-fill",
"ri-seedling-line",
"ri-send-backward",
"ri-send-plane-2-fill",
"ri-send-plane-2-line",
"ri-send-plane-fill",
"ri-send-plane-line",
"ri-send-to-back",
"ri-sensor-fill",
"ri-sensor-line",
"ri-separator",
"ri-server-fill",
"ri-server-line",
"ri-service-fill",
"ri-service-line",
"ri-settings-2-fill",
"ri-settings-2-line",
"ri-settings-3-fill",
"ri-settings-3-line",
"ri-settings-4-fill",
"ri-settings-4-line",
"ri-settings-5-fill",
"ri-settings-5-line",
"ri-settings-6-fill",
"ri-settings-6-line",
"ri-settings-fill",
"ri-settings-line",
"ri-shape-2-fill",
"ri-shape-2-line",
"ri-shape-fill",
"ri-shape-line",
"ri-share-box-fill",
"ri-share-box-line",
"ri-share-circle-fill",
"ri-share-circle-line",
"ri-share-fill",
"ri-share-forward-2-fill",
"ri-share-forward-2-line",
"ri-share-forward-box-fill",
"ri-share-forward-box-line",
"ri-share-forward-fill",
"ri-share-forward-line",
"ri-share-line",
"ri-shield-check-fill",
"ri-shield-check-line",
"ri-shield-cross-fill",
"ri-shield-cross-line",
"ri-shield-fill",
"ri-shield-flash-fill",
"ri-shield-flash-line",
"ri-shield-keyhole-fill",
"ri-shield-keyhole-line",
"ri-shield-line",
"ri-shield-star-fill",
"ri-shield-star-line",
"ri-shield-user-fill",
"ri-shield-user-line",
"ri-ship-2-fill",
"ri-ship-2-line",
"ri-ship-fill",
"ri-ship-line",
"ri-shirt-fill",
"ri-shirt-line",
"ri-shopping-bag-2-fill",
"ri-shopping-bag-2-line",
"ri-shopping-bag-3-fill",
"ri-shopping-bag-3-line",
"ri-shopping-bag-fill",
"ri-shopping-bag-line",
"ri-shopping-basket-2-fill",
"ri-shopping-basket-2-line",
"ri-shopping-basket-fill",
"ri-shopping-basket-line",
"ri-shopping-cart-2-fill",
"ri-shopping-cart-2-line",
"ri-shopping-cart-fill",
"ri-shopping-cart-line",
"ri-showers-fill",
"ri-showers-line",
"ri-shuffle-fill",
"ri-shuffle-line",
"ri-shut-down-fill",
"ri-shut-down-line",
"ri-side-bar-fill",
"ri-side-bar-line",
"ri-signal-tower-fill",
"ri-signal-tower-line",
"ri-signal-wifi-1-fill",
"ri-signal-wifi-1-line",
"ri-signal-wifi-2-fill",
"ri-signal-wifi-2-line",
"ri-signal-wifi-3-fill",
"ri-signal-wifi-3-line",
"ri-signal-wifi-error-fill",
"ri-signal-wifi-error-line",
"ri-signal-wifi-fill",
"ri-signal-wifi-line",
"ri-signal-wifi-off-fill",
"ri-signal-wifi-off-line",
"ri-sim-card-2-fill",
"ri-sim-card-2-line",
"ri-sim-card-fill",
"ri-sim-card-line",
"ri-single-quotes-l",
"ri-single-quotes-r",
"ri-sip-fill",
"ri-sip-line",
"ri-skip-back-fill",
"ri-skip-back-line",
"ri-skip-back-mini-fill",
"ri-skip-back-mini-line",
"ri-skip-forward-fill",
"ri-skip-forward-line",
"ri-skip-forward-mini-fill",
"ri-skip-forward-mini-line",
"ri-skull-2-fill",
"ri-skull-2-line",
"ri-skull-fill",
"ri-skull-line",
"ri-skype-fill",
"ri-skype-line",
"ri-slack-fill",
"ri-slack-line",
"ri-slice-fill",
"ri-slice-line",
"ri-slideshow-2-fill",
"ri-slideshow-2-line",
"ri-slideshow-3-fill",
"ri-slideshow-3-line",
"ri-slideshow-4-fill",
"ri-slideshow-4-line",
"ri-slideshow-fill",
"ri-slideshow-line",
"ri-smartphone-fill",
"ri-smartphone-line",
"ri-snapchat-fill",
"ri-snapchat-line",
"ri-snowy-fill",
"ri-snowy-line",
"ri-sort-asc",
"ri-sort-desc",
"ri-sound-module-fill",
"ri-sound-module-line",
"ri-soundcloud-fill",
"ri-soundcloud-line",
"ri-space-ship-fill",
"ri-space-ship-line",
"ri-space",
"ri-spam-2-fill",
"ri-spam-2-line",
"ri-spam-3-fill",
"ri-spam-3-line",
"ri-spam-fill",
"ri-spam-line",
"ri-speaker-2-fill",
"ri-speaker-2-line",
"ri-speaker-3-fill",
"ri-speaker-3-line",
"ri-speaker-fill",
"ri-speaker-line",
"ri-spectrum-fill",
"ri-spectrum-line",
"ri-speed-fill",
"ri-speed-line",
"ri-speed-mini-fill",
"ri-speed-mini-line",
"ri-split-cells-horizontal",
"ri-split-cells-vertical",
"ri-spotify-fill",
"ri-spotify-line",
"ri-spy-fill",
"ri-spy-line",
"ri-stack-fill",
"ri-stack-line",
"ri-stack-overflow-fill",
"ri-stack-overflow-line",
"ri-stackshare-fill",
"ri-stackshare-line",
"ri-star-fill",
"ri-star-half-fill",
"ri-star-half-line",
"ri-star-half-s-fill",
"ri-star-half-s-line",
"ri-star-line",
"ri-star-s-fill",
"ri-star-s-line",
"ri-star-smile-fill",
"ri-star-smile-line",
"ri-steam-fill",
"ri-steam-line",
"ri-steering-2-fill",
"ri-steering-2-line",
"ri-steering-fill",
"ri-steering-line",
"ri-stethoscope-fill",
"ri-stethoscope-line",
"ri-sticky-note-2-fill",
"ri-sticky-note-2-line",
"ri-sticky-note-fill",
"ri-sticky-note-line",
"ri-stock-fill",
"ri-stock-line",
"ri-stop-circle-fill",
"ri-stop-circle-line",
"ri-stop-fill",
"ri-stop-line",
"ri-stop-mini-fill",
"ri-stop-mini-line",
"ri-store-2-fill",
"ri-store-2-line",
"ri-store-3-fill",
"ri-store-3-line",
"ri-store-fill",
"ri-store-line",
"ri-strikethrough-2",
"ri-strikethrough",
"ri-subscript-2",
"ri-subscript",
"ri-subtract-fill",
"ri-subtract-line",
"ri-subway-fill",
"ri-subway-line",
"ri-subway-wifi-fill",
"ri-subway-wifi-line",
"ri-suitcase-2-fill",
"ri-suitcase-2-line",
"ri-suitcase-3-fill",
"ri-suitcase-3-line",
"ri-suitcase-fill",
"ri-suitcase-line",
"ri-sun-cloudy-fill",
"ri-sun-cloudy-line",
"ri-sun-fill",
"ri-sun-foggy-fill",
"ri-sun-foggy-line",
"ri-sun-line",
"ri-superscript-2",
"ri-superscript",
"ri-surgical-mask-fill",
"ri-surgical-mask-line",
"ri-surround-sound-fill",
"ri-surround-sound-line",
"ri-survey-fill",
"ri-survey-line",
"ri-swap-box-fill",
"ri-swap-box-line",
"ri-swap-fill",
"ri-swap-line",
"ri-switch-fill",
"ri-switch-line",
"ri-sword-fill",
"ri-sword-line",
"ri-syringe-fill",
"ri-syringe-line",
"ri-t-box-fill",
"ri-t-box-line",
"ri-t-shirt-2-fill",
"ri-t-shirt-2-line",
"ri-t-shirt-air-fill",
"ri-t-shirt-air-line",
"ri-t-shirt-fill",
"ri-t-shirt-line",
"ri-table-2",
"ri-table-alt-fill",
"ri-table-alt-line",
"ri-table-fill",
"ri-table-line",
"ri-tablet-fill",
"ri-tablet-line",
"ri-takeaway-fill",
"ri-takeaway-line",
"ri-taobao-fill",
"ri-taobao-line",
"ri-tape-fill",
"ri-tape-line",
"ri-task-fill",
"ri-task-line",
"ri-taxi-fill",
"ri-taxi-line",
"ri-taxi-wifi-fill",
"ri-taxi-wifi-line",
"ri-team-fill",
"ri-team-line",
"ri-telegram-fill",
"ri-telegram-line",
"ri-temp-cold-fill",
"ri-temp-cold-line",
"ri-temp-hot-fill",
"ri-temp-hot-line",
"ri-terminal-box-fill",
"ri-terminal-box-line",
"ri-terminal-fill",
"ri-terminal-line",
"ri-terminal-window-fill",
"ri-terminal-window-line",
"ri-test-tube-fill",
"ri-test-tube-line",
"ri-text-direction-l",
"ri-text-direction-r",
"ri-text-spacing",
"ri-text-wrap",
"ri-text",
"ri-thermometer-fill",
"ri-thermometer-line",
"ri-thumb-down-fill",
"ri-thumb-down-line",
"ri-thumb-up-fill",
"ri-thumb-up-line",
"ri-thunderstorms-fill",
"ri-thunderstorms-line",
"ri-ticket-2-fill",
"ri-ticket-2-line",
"ri-ticket-fill",
"ri-ticket-line",
"ri-time-fill",
"ri-time-line",
"ri-timer-2-fill",
"ri-timer-2-line",
"ri-timer-fill",
"ri-timer-flash-fill",
"ri-timer-flash-line",
"ri-timer-line",
"ri-todo-fill",
"ri-todo-line",
"ri-toggle-fill",
"ri-toggle-line",
"ri-tools-fill",
"ri-tools-line",
"ri-tornado-fill",
"ri-tornado-line",
"ri-trademark-fill",
"ri-trademark-line",
"ri-traffic-light-fill",
"ri-traffic-light-line",
"ri-train-fill",
"ri-train-line",
"ri-train-wifi-fill",
"ri-train-wifi-line",
"ri-translate-2",
"ri-translate",
"ri-travesti-fill",
"ri-travesti-line",
"ri-treasure-map-fill",
"ri-treasure-map-line",
"ri-trello-fill",
"ri-trello-line",
"ri-trophy-fill",
"ri-trophy-line",
"ri-truck-fill",
"ri-truck-line",
"ri-tumblr-fill",
"ri-tumblr-line",
"ri-tv-2-fill",
"ri-tv-2-line",
"ri-tv-fill",
"ri-tv-line",
"ri-twitch-fill",
"ri-twitch-line",
"ri-twitter-fill",
"ri-twitter-line",
"ri-typhoon-fill",
"ri-typhoon-line",
"ri-u-disk-fill",
"ri-u-disk-line",
"ri-ubuntu-fill",
"ri-ubuntu-line",
"ri-umbrella-fill",
"ri-umbrella-line",
"ri-underline",
"ri-uninstall-fill",
"ri-uninstall-line",
"ri-unsplash-fill",
"ri-unsplash-line",
"ri-upload-2-fill",
"ri-upload-2-line",
"ri-upload-cloud-2-fill",
"ri-upload-cloud-2-line",
"ri-upload-cloud-fill",
"ri-upload-cloud-line",
"ri-upload-fill",
"ri-upload-line",
"ri-usb-fill",
"ri-usb-line",
"ri-user-2-fill",
"ri-user-2-line",
"ri-user-3-fill",
"ri-user-3-line",
"ri-user-4-fill",
"ri-user-4-line",
"ri-user-5-fill",
"ri-user-5-line",
"ri-user-6-fill",
"ri-user-6-line",
"ri-user-add-fill",
"ri-user-add-line",
"ri-user-fill",
"ri-user-follow-fill",
"ri-user-follow-line",
"ri-user-heart-fill",
"ri-user-heart-line",
"ri-user-line",
"ri-user-location-fill",
"ri-user-location-line",
"ri-user-received-2-fill",
"ri-user-received-2-line",
"ri-user-received-fill",
"ri-user-received-line",
"ri-user-search-fill",
"ri-user-search-line",
"ri-user-settings-fill",
"ri-user-settings-line",
"ri-user-shared-2-fill",
"ri-user-shared-2-line",
"ri-user-shared-fill",
"ri-user-shared-line",
"ri-user-smile-fill",
"ri-user-smile-line",
"ri-user-star-fill",
"ri-user-star-line",
"ri-user-unfollow-fill",
"ri-user-unfollow-line",
"ri-user-voice-fill",
"ri-user-voice-line",
"ri-video-add-fill",
"ri-video-add-line",
"ri-video-chat-fill",
"ri-video-chat-line",
"ri-video-download-fill",
"ri-video-download-line",
"ri-video-fill",
"ri-video-line",
"ri-video-upload-fill",
"ri-video-upload-line",
"ri-vidicon-2-fill",
"ri-vidicon-2-line",
"ri-vidicon-fill",
"ri-vidicon-line",
"ri-vimeo-fill",
"ri-vimeo-line",
"ri-vip-crown-2-fill",
"ri-vip-crown-2-line",
"ri-vip-crown-fill",
"ri-vip-crown-line",
"ri-vip-diamond-fill",
"ri-vip-diamond-line",
"ri-vip-fill",
"ri-vip-line",
"ri-virus-fill",
"ri-virus-line",
"ri-visa-fill",
"ri-visa-line",
"ri-voice-recognition-fill",
"ri-voice-recognition-line",
"ri-voiceprint-fill",
"ri-voiceprint-line",
"ri-volume-down-fill",
"ri-volume-down-line",
"ri-volume-mute-fill",
"ri-volume-mute-line",
"ri-volume-off-vibrate-fill",
"ri-volume-off-vibrate-line",
"ri-volume-up-fill",
"ri-volume-up-line",
"ri-volume-vibrate-fill",
"ri-volume-vibrate-line",
"ri-vuejs-fill",
"ri-vuejs-line",
"ri-walk-fill",
"ri-walk-line",
"ri-wallet-2-fill",
"ri-wallet-2-line",
"ri-wallet-3-fill",
"ri-wallet-3-line",
"ri-wallet-fill",
"ri-wallet-line",
"ri-water-flash-fill",
"ri-water-flash-line",
"ri-webcam-fill",
"ri-webcam-line",
"ri-wechat-2-fill",
"ri-wechat-2-line",
"ri-wechat-fill",
"ri-wechat-line",
"ri-wechat-pay-fill",
"ri-wechat-pay-line",
"ri-weibo-fill",
"ri-weibo-line",
"ri-whatsapp-fill",
"ri-whatsapp-line",
"ri-wheelchair-fill",
"ri-wheelchair-line",
"ri-wifi-fill",
"ri-wifi-line",
"ri-wifi-off-fill",
"ri-wifi-off-line",
"ri-window-2-fill",
"ri-window-2-line",
"ri-window-fill",
"ri-window-line",
"ri-windows-fill",
"ri-windows-line",
"ri-windy-fill",
"ri-windy-line",
"ri-wireless-charging-fill",
"ri-wireless-charging-line",
"ri-women-fill",
"ri-women-line",
"ri-wubi-input",
"ri-xbox-fill",
"ri-xbox-line",
"ri-xing-fill",
"ri-xing-line",
"ri-youtube-fill",
"ri-youtube-line",
"ri-zcool-fill",
"ri-zcool-line",
"ri-zhihu-fill",
"ri-zhihu-line",
"ri-zoom-in-fill",
"ri-zoom-in-line",
"ri-zoom-out-fill",
"ri-zoom-out-line",
"ri-zzz-fill",
"ri-zzz-line",
"ri-arrow-down-double-fill",
"ri-arrow-down-double-line",
"ri-arrow-left-double-fill",
"ri-arrow-left-double-line",
"ri-arrow-right-double-fill",
"ri-arrow-right-double-line",
"ri-arrow-turn-back-fill",
"ri-arrow-turn-back-line",
"ri-arrow-turn-forward-fill",
"ri-arrow-turn-forward-line",
"ri-arrow-up-double-fill",
"ri-arrow-up-double-line",
"ri-bard-fill",
"ri-bard-line",
"ri-bootstrap-fill",
"ri-bootstrap-line",
"ri-box-1-fill",
"ri-box-1-line",
"ri-box-2-fill",
"ri-box-2-line",
"ri-box-3-fill",
"ri-box-3-line",
"ri-brain-fill",
"ri-brain-line",
"ri-candle-fill",
"ri-candle-line",
"ri-cash-fill",
"ri-cash-line",
"ri-contract-left-fill",
"ri-contract-left-line",
"ri-contract-left-right-fill",
"ri-contract-left-right-line",
"ri-contract-right-fill",
"ri-contract-right-line",
"ri-contract-up-down-fill",
"ri-contract-up-down-line",
"ri-copilot-fill",
"ri-copilot-line",
"ri-corner-down-left-fill",
"ri-corner-down-left-line",
"ri-corner-down-right-fill",
"ri-corner-down-right-line",
"ri-corner-left-down-fill",
"ri-corner-left-down-line",
"ri-corner-left-up-fill",
"ri-corner-left-up-line",
"ri-corner-right-down-fill",
"ri-corner-right-down-line",
"ri-corner-right-up-fill",
"ri-corner-right-up-line",
"ri-corner-up-left-double-fill",
"ri-corner-up-left-double-line",
"ri-corner-up-left-fill",
"ri-corner-up-left-line",
"ri-corner-up-right-double-fill",
"ri-corner-up-right-double-line",
"ri-corner-up-right-fill",
"ri-corner-up-right-line",
"ri-cross-fill",
"ri-cross-line",
"ri-edge-new-fill",
"ri-edge-new-line",
"ri-equal-fill",
"ri-equal-line",
"ri-expand-left-fill",
"ri-expand-left-line",
"ri-expand-left-right-fill",
"ri-expand-left-right-line",
"ri-expand-right-fill",
"ri-expand-right-line",
"ri-expand-up-down-fill",
"ri-expand-up-down-line",
"ri-flickr-fill",
"ri-flickr-line",
"ri-forward-10-fill",
"ri-forward-10-line",
"ri-forward-15-fill",
"ri-forward-15-line",
"ri-forward-30-fill",
"ri-forward-30-line",
"ri-forward-5-fill",
"ri-forward-5-line",
"ri-graduation-cap-fill",
"ri-graduation-cap-line",
"ri-home-office-fill",
"ri-home-office-line",
"ri-hourglass-2-fill",
"ri-hourglass-2-line",
"ri-hourglass-fill",
"ri-hourglass-line",
"ri-javascript-fill",
"ri-javascript-line",
"ri-loop-left-fill",
"ri-loop-left-line",
"ri-loop-right-fill",
"ri-loop-right-line",
"ri-memories-fill",
"ri-memories-line",
"ri-meta-fill",
"ri-meta-line",
"ri-microsoft-loop-fill",
"ri-microsoft-loop-line",
"ri-nft-fill",
"ri-nft-line",
"ri-notion-fill",
"ri-notion-line",
"ri-openai-fill",
"ri-openai-line",
"ri-overline",
"ri-p2p-fill",
"ri-p2p-line",
"ri-presentation-fill",
"ri-presentation-line",
"ri-replay-10-fill",
"ri-replay-10-line",
"ri-replay-15-fill",
"ri-replay-15-line",
"ri-replay-30-fill",
"ri-replay-30-line",
"ri-replay-5-fill",
"ri-replay-5-line",
"ri-school-fill",
"ri-school-line",
"ri-shining-2-fill",
"ri-shining-2-line",
"ri-shining-fill",
"ri-shining-line",
"ri-sketching",
"ri-skip-down-fill",
"ri-skip-down-line",
"ri-skip-left-fill",
"ri-skip-left-line",
"ri-skip-right-fill",
"ri-skip-right-line",
"ri-skip-up-fill",
"ri-skip-up-line",
"ri-slow-down-fill",
"ri-slow-down-line",
"ri-sparkling-2-fill",
"ri-sparkling-2-line",
"ri-sparkling-fill",
"ri-sparkling-line",
"ri-speak-fill",
"ri-speak-line",
"ri-speed-up-fill",
"ri-speed-up-line",
"ri-tiktok-fill",
"ri-tiktok-line",
"ri-token-swap-fill",
"ri-token-swap-line",
"ri-unpin-fill",
"ri-unpin-line",
"ri-wechat-channels-fill",
"ri-wechat-channels-line",
"ri-wordpress-fill",
"ri-wordpress-line",
"ri-blender-fill",
"ri-blender-line",
"ri-emoji-sticker-fill",
"ri-emoji-sticker-line",
"ri-git-close-pull-request-fill",
"ri-git-close-pull-request-line",
"ri-instance-fill",
"ri-instance-line",
"ri-megaphone-fill",
"ri-megaphone-line",
"ri-pass-expired-fill",
"ri-pass-expired-line",
"ri-pass-pending-fill",
"ri-pass-pending-line",
"ri-pass-valid-fill",
"ri-pass-valid-line",
"ri-ai-generate",
"ri-calendar-close-fill",
"ri-calendar-close-line",
"ri-draggable",
"ri-font-family",
"ri-font-mono",
"ri-font-sans-serif",
"ri-font-sans",
"ri-hard-drive-3-fill",
"ri-hard-drive-3-line",
"ri-kick-fill",
"ri-kick-line",
"ri-list-check-3",
"ri-list-indefinite",
"ri-list-ordered-2",
"ri-list-radio",
"ri-openbase-fill",
"ri-openbase-line",
"ri-planet-fill",
"ri-planet-line",
"ri-prohibited-fill",
"ri-prohibited-line",
"ri-quote-text",
"ri-seo-fill",
"ri-seo-line",
"ri-slash-commands",
"ri-archive-2-fill",
"ri-archive-2-line",
"ri-inbox-2-fill",
"ri-inbox-2-line",
"ri-shake-hands-fill",
"ri-shake-hands-line",
"ri-supabase-fill",
"ri-supabase-line",
"ri-water-percent-fill",
"ri-water-percent-line",
"ri-yuque-fill",
"ri-yuque-line",
"ri-crosshair-2-fill",
"ri-crosshair-2-line",
"ri-crosshair-fill",
"ri-crosshair-line",
"ri-file-close-fill",
"ri-file-close-line",
"ri-infinity-fill",
"ri-infinity-line",
"ri-rfid-fill",
"ri-rfid-line",
"ri-slash-commands-2",
"ri-user-forbid-fill",
"ri-user-forbid-line",
"ri-beer-fill",
"ri-beer-line",
"ri-circle-fill",
"ri-circle-line",
"ri-dropdown-list",
"ri-file-image-fill",
"ri-file-image-line",
"ri-file-pdf-2-fill",
"ri-file-pdf-2-line",
"ri-file-video-fill",
"ri-file-video-line",
"ri-folder-image-fill",
"ri-folder-image-line",
"ri-folder-video-fill",
"ri-folder-video-line",
"ri-hexagon-fill",
"ri-hexagon-line",
"ri-menu-search-fill",
"ri-menu-search-line",
"ri-octagon-fill",
"ri-octagon-line",
"ri-pentagon-fill",
"ri-pentagon-line",
"ri-rectangle-fill",
"ri-rectangle-line",
"ri-robot-2-fill",
"ri-robot-2-line",
"ri-shapes-fill",
"ri-shapes-line",
"ri-square-fill",
"ri-square-line",
"ri-tent-fill",
"ri-tent-line",
"ri-threads-fill",
"ri-threads-line",
"ri-tree-fill",
"ri-tree-line",
"ri-triangle-fill",
"ri-triangle-line",
"ri-twitter-x-fill",
"ri-twitter-x-line",
"ri-verified-badge-fill",
"ri-verified-badge-line",
"ri-armchair-fill",
"ri-armchair-line",
"ri-bnb-fill",
"ri-bnb-line",
"ri-bread-fill",
"ri-bread-line",
"ri-btc-fill",
"ri-btc-line",
"ri-calendar-schedule-fill",
"ri-calendar-schedule-line",
"ri-dice-1-fill",
"ri-dice-1-line",
"ri-dice-2-fill",
"ri-dice-2-line",
"ri-dice-3-fill",
"ri-dice-3-line",
"ri-dice-4-fill",
"ri-dice-4-line",
"ri-dice-5-fill",
"ri-dice-5-line",
"ri-dice-6-fill",
"ri-dice-6-line",
"ri-dice-fill",
"ri-dice-line",
"ri-drinks-fill",
"ri-drinks-line",
"ri-equalizer-2-fill",
"ri-equalizer-2-line",
"ri-equalizer-3-fill",
"ri-equalizer-3-line",
"ri-eth-fill",
"ri-eth-line",
"ri-flower-fill",
"ri-flower-line",
"ri-glasses-2-fill",
"ri-glasses-2-line",
"ri-glasses-fill",
"ri-glasses-line",
"ri-goggles-fill",
"ri-goggles-line",
"ri-image-circle-fill",
"ri-image-circle-line",
"ri-info-i",
"ri-money-rupee-circle-fill",
"ri-money-rupee-circle-line",
"ri-news-fill",
"ri-news-line",
"ri-robot-3-fill",
"ri-robot-3-line",
"ri-share-2-fill",
"ri-share-2-line",
"ri-sofa-fill",
"ri-sofa-line",
"ri-svelte-fill",
"ri-svelte-line",
"ri-vk-fill",
"ri-vk-line",
"ri-xrp-fill",
"ri-xrp-line",
"ri-xtz-fill",
"ri-xtz-line",
"ri-archive-stack-fill",
"ri-archive-stack-line",
"ri-bowl-fill",
"ri-bowl-line",
"ri-calendar-view",
"ri-carousel-view",
"ri-code-block",
"ri-color-filter-fill",
"ri-color-filter-line",
"ri-contacts-book-3-fill",
"ri-contacts-book-3-line",
"ri-contract-fill",
"ri-contract-line",
"ri-drinks-2-fill",
"ri-drinks-2-line",
"ri-export-fill",
"ri-export-line",
"ri-file-check-fill",
"ri-file-check-line",
"ri-focus-mode",
"ri-folder-6-fill",
"ri-folder-6-line",
"ri-folder-check-fill",
"ri-folder-check-line",
"ri-folder-close-fill",
"ri-folder-close-line",
"ri-folder-cloud-fill",
"ri-folder-cloud-line",
"ri-gallery-view-2",
"ri-gallery-view",
"ri-hand",
"ri-import-fill",
"ri-import-line",
"ri-information-2-fill",
"ri-information-2-line",
"ri-kanban-view-2",
"ri-kanban-view",
"ri-list-view",
"ri-lock-star-fill",
"ri-lock-star-line",
"ri-puzzle-2-fill",
"ri-puzzle-2-line",
"ri-puzzle-fill",
"ri-puzzle-line",
"ri-ram-2-fill",
"ri-ram-2-line",
"ri-ram-fill",
"ri-ram-line",
"ri-receipt-fill",
"ri-receipt-line",
"ri-shadow-fill",
"ri-shadow-line",
"ri-sidebar-fold-fill",
"ri-sidebar-fold-line",
"ri-sidebar-unfold-fill",
"ri-sidebar-unfold-line",
"ri-slideshow-view",
"ri-sort-alphabet-asc",
"ri-sort-alphabet-desc",
"ri-sort-number-asc",
"ri-sort-number-desc",
"ri-stacked-view",
"ri-sticky-note-add-fill",
"ri-sticky-note-add-line",
"ri-swap-2-fill",
"ri-swap-2-line",
"ri-swap-3-fill",
"ri-swap-3-line",
"ri-table-3",
"ri-table-view",
"ri-text-block",
"ri-text-snippet",
"ri-timeline-view",
"ri-blogger-fill",
"ri-blogger-line",
"ri-chat-thread-fill",
"ri-chat-thread-line",
"ri-discount-percent-fill",
"ri-discount-percent-line",
"ri-exchange-2-fill",
"ri-exchange-2-line",
"ri-git-fork-fill",
"ri-git-fork-line",
"ri-input-field",
"ri-progress-1-fill",
"ri-progress-1-line",
"ri-progress-2-fill",
"ri-progress-2-line",
"ri-progress-3-fill",
"ri-progress-3-line",
"ri-progress-4-fill",
"ri-progress-4-line",
"ri-progress-5-fill",
"ri-progress-5-line",
"ri-progress-6-fill",
"ri-progress-6-line",
"ri-progress-7-fill",
"ri-progress-7-line",
"ri-progress-8-fill",
"ri-progress-8-line",
"ri-remix-run-fill",
"ri-remix-run-line",
"ri-signpost-fill",
"ri-signpost-line",
"ri-time-zone-fill",
"ri-time-zone-line",
"ri-arrow-down-wide-fill",
"ri-arrow-down-wide-line",
"ri-arrow-left-wide-fill",
"ri-arrow-left-wide-line",
"ri-arrow-right-wide-fill",
"ri-arrow-right-wide-line",
"ri-arrow-up-wide-fill",
"ri-arrow-up-wide-line",
"ri-bluesky-fill",
"ri-bluesky-line",
"ri-expand-height-fill",
"ri-expand-height-line",
"ri-expand-width-fill",
"ri-expand-width-line",
"ri-forward-end-fill",
"ri-forward-end-line",
"ri-forward-end-mini-fill",
"ri-forward-end-mini-line",
"ri-friendica-fill",
"ri-friendica-line",
"ri-git-pr-draft-fill",
"ri-git-pr-draft-line",
"ri-play-reverse-fill",
"ri-play-reverse-line",
"ri-play-reverse-mini-fill",
"ri-play-reverse-mini-line",
"ri-rewind-start-fill",
"ri-rewind-start-line",
"ri-rewind-start-mini-fill",
"ri-rewind-start-mini-line",
"ri-scroll-to-bottom-fill",
"ri-scroll-to-bottom-line",
]
|
ExampleConfig
|
python
|
huggingface__transformers
|
src/transformers/models/edgetam/modeling_edgetam.py
|
{
"start": 28587,
"end": 30692
}
|
class ____(nn.Module):
def __init__(self, config: EdgeTamMaskDecoderConfig):
super().__init__()
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.layers = nn.ModuleList()
for i in range(self.num_hidden_layers):
self.layers.append(EdgeTamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0)))
self.final_attn_token_to_image = EdgeTamAttention(config)
self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size)
def forward(
self,
point_embeddings: Tensor,
image_embeddings: Tensor,
image_positional_embeddings: Tensor,
attention_similarity: Tensor,
target_embedding=None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
if image_embeddings is None:
raise ValueError("You have to specify an image_embedding")
image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
# Prepare queries
queries = point_embeddings
keys = image_embeddings
# Apply transformer blocks and final layernorm
for layer in self.layers:
if target_embedding is not None:
queries += target_embedding
queries, keys, _ = layer(
queries=queries,
keys=keys,
query_point_embedding=point_embeddings,
key_point_embedding=image_positional_embeddings,
attention_similarity=attention_similarity,
**kwargs,
)
# Apply the final attention layer from the points to the image
query = queries + point_embeddings
key = keys + image_positional_embeddings
attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys)
queries = queries + attn_out
queries = self.layer_norm_final_attn(queries)
return queries, keys
|
EdgeTamTwoWayTransformer
|
python
|
ZoranPandovski__al-go-rithms
|
data_structures/binarySearch_tree/Python/binary_search_tree.py
|
{
"start": 122,
"end": 1124
}
|
class ____:
def __init__(self, username, name, email):
self.username = username
self.name = name
self.email = email
def __repr__(self):
return "User(username='{}', name='{}', email='{}')".format(self.username, self.name, self.email)
def __str__(self):
return self.__repr__()
# making some instances of User to work with
aakash = User('aakash', 'Aakash Rai', 'aakash@example.com')
biraj = User('biraj', 'Biraj Das', 'biraj@example.com')
hemanth = User('hemanth', 'Hemanth Jain', 'hemanth@example.com')
jadhesh = User('jadhesh', 'Jadhesh Verma', 'jadhesh@example.com')
siddhant = User('siddhant', 'Siddhant Sinha', 'siddhant@example.com')
sonaksh = User('sonaksh', 'Sonaksh Kumar', 'sonaksh@example.com')
vishal = User('vishal', 'Vishal Goel', 'vishal@example.com')
# creating a list that contains all these User objects
users = [aakash, biraj, hemanth, jadhesh, siddhant, sonaksh, vishal]
# creating a node for the BST called BSTNode
|
User
|
python
|
sqlalchemy__sqlalchemy
|
examples/space_invaders/space_invaders.py
|
{
"start": 1290,
"end": 3142
}
|
class ____(Base):
"""Describe a "glyph", a graphical element
to be painted on the screen.
"""
__tablename__ = "glyph"
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(String)
width = Column(Integer)
height = Column(Integer)
data = Column(String)
alt_data = Column(String)
__mapper_args__ = {"polymorphic_on": type}
def __init__(self, name, img, alt=None):
self.name = name
self.data, self.width, self.height = self._encode_glyph(img)
if alt is not None:
self.alt_data, alt_w, alt_h = self._encode_glyph(alt)
def _encode_glyph(self, img):
"""Receive a textual description of the glyph and
encode into a format understood by
GlyphCoordinate.render().
"""
img = re.sub(r"^\n", "", textwrap.dedent(img))
color = "W"
lines = [line.rstrip() for line in img.split("\n")]
data = []
for line in lines:
render_line = []
line = list(line)
while line:
char = line.pop(0)
if char == "#":
color = line.pop(0)
continue
render_line.append((color, char))
data.append(render_line)
width = max([len(rl) for rl in data])
data = "".join(
"".join("%s%s" % (color, char) for color, char in render_line)
+ ("W " * (width - len(render_line)))
for render_line in data
)
return data, width, len(lines)
def glyph_for_state(self, coord, state):
"""Return the appropriate data representation
for this Glyph, based on the current coordinates
and state.
Subclasses may override this to provide animations.
"""
return self.data
|
Glyph
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_groupby.py
|
{
"start": 4660,
"end": 5431
}
|
class ____:
@functools.cached_property
def _by_meta(self):
return [meta_nonempty(x._meta) if isinstance(x, Expr) else x for x in self.by]
@functools.cached_property
def _by_columns(self):
return [x for x in self.by if not isinstance(x, Expr)]
@property
def split_by(self):
return list(
flatten(
[[x] if not isinstance(x, Expr) else x.columns for x in self.by],
container=list,
)
)
@functools.cached_property
def by(self):
return self.operands[len(self._parameters) :]
@functools.cached_property
def levels(self):
return _determine_levels(self.by)
@property
def shuffle_by_index(self):
return True
|
GroupByBase
|
python
|
doocs__leetcode
|
lcp/LCP 70. 沙地治理/Solution.py
|
{
"start": 0,
"end": 533
}
|
class ____:
def sandyLandManagement(self, size: int) -> List[List[int]]:
ans = [[1, 1]]
k = 0
for i in range(size, 1, -1):
if k == 0:
for j in range(1, i << 1, 2):
ans.append([i, j])
elif k == 1:
ans.append([i, 2])
elif k == 2:
for j in range(3, i << 1, 2):
ans.append([i, j])
else:
ans.append([i, 1])
k = (k + 1) % 4
return ans
|
Solution
|
python
|
google__jax
|
tests/mosaic/flash_attention_test.py
|
{
"start": 1315,
"end": 2785
}
|
class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if flash_attention is None:
self.skipTest("Mosaic GPU not available.")
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_equal("9.0")):
self.skipTest("Only works on GPU with capability sm90a")
@parameterized.product(
batch_size=(1,),
q_seq_len=(4096,),
kv_seq_len=(4096,),
num_q_and_kv_heads=((4, 1), # MQA
(6, 3), # GQA
(4, 4),), # MHA
head_dim=(64, 128, 256),
# Provide a default value for exp_impl if 'flash_attention' is not
# available. Bypasses test failures when Mosaic is not available.
exp_impl=[*(flash_attention.ExpImplementation
if flash_attention is not None else (NotImplementedError,))],
)
def test_flash_attention(self, batch_size, q_seq_len, kv_seq_len,
num_q_and_kv_heads, head_dim, exp_impl):
num_q_heads, num_kv_heads = num_q_and_kv_heads
flash_attention.benchmark_and_verify(
batch_size=batch_size,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len,
num_q_heads=num_q_heads,
num_kv_heads=num_kv_heads,
head_dim=head_dim,
exp_impl=exp_impl,
blocks=flash_attention.BlockSizes(stages=2, q=64, kv=64)
)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
FlashAttentionTestCase
|
python
|
spyder-ide__spyder
|
spyder/widgets/helperwidgets.py
|
{
"start": 25014,
"end": 26904
}
|
class ____(QLabel):
"""Label to report a message to users."""
def __init__(self, parent):
super().__init__("", parent)
# Set main attributes
self.setWordWrap(True)
self.setVisible(False)
# Set style
css = qstylizer.style.StyleSheet()
css.QLabel.setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_2,
# Top margin is set by the layout
marginTop="0px",
marginRight=f"{9 * AppStyle.MarginSize}px",
# We don't need bottom margin because there are no other elements
# below this one.
marginBottom="0px",
# The extra 5px are necessary because we need to add them to all
# lineedits in this dialog to align them to the labels on top of
# them (see SpyderConfigPage.create_lineedit).
marginLeft=f"{9 * AppStyle.MarginSize + 5}px",
padding=f"{3 * AppStyle.MarginSize}px {6 * AppStyle.MarginSize}px",
borderRadius=SpyderPalette.SIZE_BORDER_RADIUS,
)
self.setStyleSheet(css.toString())
def set_text(self, text: str):
n_reasons = 1
if "<br>" in text or "\n" in text:
# There are two or more reasons in the text.
n_reasons = 2
self.setAlignment(Qt.AlignCenter if n_reasons == 1 else Qt.AlignLeft)
self.setText(text)
def test_msgcheckbox():
from spyder.utils.qthelpers import qapplication
app = qapplication() # noqa
box = MessageCheckBox()
box.setWindowTitle(_("Spyder updates"))
box.setText("Testing checkbox")
box.set_checkbox_text("Check for updates on startup?")
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
box.setIcon(QMessageBox.Information)
box.exec_()
if __name__ == '__main__':
test_msgcheckbox()
|
MessageLabel
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/tasks.py
|
{
"start": 15750,
"end": 22261
}
|
class ____(NonStrictDataModel):
"""
:param binary: Binary to use when running the script
:type binary: str
:param repository: Name of the repository where the script is located
:type repository: str
:param tag: Repository tag
:type tag: str
:param branch: Repository branch id If not provided and tag not provided,
default repository branch is used.
:type branch: str
:param version_num: Version (changeset) number. Optional (default is head
version) Unused if tag is provided.
:type version_num: str
:param entry_point: Path to execute within the repository
:type entry_point: str
:param working_dir: Path to the folder from which to run the script Default -
root folder of repository
:type working_dir: str
:param requirements: A JSON object containing requirements strings by key
:type requirements: dict
:param diff: Uncommitted changes found in the repository when task was run
:type diff: str
"""
_schema = {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": (
"Repository branch id If not provided and tag not provided, default repository branch is used."
),
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {"description": "Repository tag", "type": ["string", "null"]},
"version_num": {
"description": (
"Version (changeset) number. Optional (default is head version) Unused if tag is provided."
),
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
binary="python",
repository=None,
tag=None,
branch=None,
version_num=None,
entry_point=None,
working_dir=None,
requirements=None,
diff=None,
**kwargs
):
super(Script, self).__init__(**kwargs)
self.binary = binary
self.repository = repository
self.tag = tag
self.branch = branch
self.version_num = version_num
self.entry_point = entry_point
self.working_dir = working_dir
self.requirements = requirements
self.diff = diff
@schema_property("binary")
def binary(self):
return self._property_binary
@binary.setter
def binary(self, value):
if value is None:
self._property_binary = None
return
self.assert_isinstance(value, "binary", six.string_types)
self._property_binary = value
@schema_property("repository")
def repository(self):
return self._property_repository
@repository.setter
def repository(self, value):
if value is None:
self._property_repository = None
return
self.assert_isinstance(value, "repository", six.string_types)
self._property_repository = value
@schema_property("tag")
def tag(self):
return self._property_tag
@tag.setter
def tag(self, value):
if value is None:
self._property_tag = None
return
self.assert_isinstance(value, "tag", six.string_types)
self._property_tag = value
@schema_property("branch")
def branch(self):
return self._property_branch
@branch.setter
def branch(self, value):
if value is None:
self._property_branch = None
return
self.assert_isinstance(value, "branch", six.string_types)
self._property_branch = value
@schema_property("version_num")
def version_num(self):
return self._property_version_num
@version_num.setter
def version_num(self, value):
if value is None:
self._property_version_num = None
return
self.assert_isinstance(value, "version_num", six.string_types)
self._property_version_num = value
@schema_property("entry_point")
def entry_point(self):
return self._property_entry_point
@entry_point.setter
def entry_point(self, value):
if value is None:
self._property_entry_point = None
return
self.assert_isinstance(value, "entry_point", six.string_types)
self._property_entry_point = value
@schema_property("working_dir")
def working_dir(self):
return self._property_working_dir
@working_dir.setter
def working_dir(self, value):
if value is None:
self._property_working_dir = None
return
self.assert_isinstance(value, "working_dir", six.string_types)
self._property_working_dir = value
@schema_property("requirements")
def requirements(self):
return self._property_requirements
@requirements.setter
def requirements(self, value):
if value is None:
self._property_requirements = None
return
self.assert_isinstance(value, "requirements", (dict,))
self._property_requirements = value
@schema_property("diff")
def diff(self):
return self._property_diff
@diff.setter
def diff(self, value):
if value is None:
self._property_diff = None
return
self.assert_isinstance(value, "diff", six.string_types)
self._property_diff = value
|
Script
|
python
|
django-haystack__django-haystack
|
test_haystack/discovery/models.py
|
{
"start": 31,
"end": 183
}
|
class ____(models.Model):
title = models.CharField(max_length=255)
body = models.TextField()
def __str__(self):
return self.title
|
Foo
|
python
|
sympy__sympy
|
sympy/polys/polyoptions.py
|
{
"start": 988,
"end": 1328
}
|
class ____(Option):
"""An option that must have a boolean value or equivalent assigned. """
@classmethod
def preprocess(cls, value):
if value in [True, False]:
return bool(value)
else:
raise OptionError("'%s' must have a boolean value assigned, got %s" % (cls.option, value))
|
BooleanOption
|
python
|
pyparsing__pyparsing
|
examples/shapes.py
|
{
"start": 547,
"end": 633
}
|
class ____(Shape):
def area(self):
return self.width * self.height
|
Rectangle
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/input/base.py
|
{
"start": 318,
"end": 2288
}
|
class ____(metaclass=ABCMeta):
"""
Abstraction for any input.
An instance of this class can be given to the constructor of a
:class:`~prompt_toolkit.application.Application` and will also be
passed to the :class:`~prompt_toolkit.eventloop.base.EventLoop`.
"""
@abstractmethod
def fileno(self) -> int:
"""
Fileno for putting this in an event loop.
"""
@abstractmethod
def typeahead_hash(self) -> str:
"""
Identifier for storing type ahead key presses.
"""
@abstractmethod
def read_keys(self) -> list[KeyPress]:
"""
Return a list of Key objects which are read/parsed from the input.
"""
def flush_keys(self) -> list[KeyPress]:
"""
Flush the underlying parser. and return the pending keys.
(Used for vt100 input.)
"""
return []
def flush(self) -> None:
"The event loop can call this when the input has to be flushed."
pass
@property
@abstractmethod
def closed(self) -> bool:
"Should be true when the input stream is closed."
return False
@abstractmethod
def raw_mode(self) -> ContextManager[None]:
"""
Context manager that turns the input into raw mode.
"""
@abstractmethod
def cooked_mode(self) -> ContextManager[None]:
"""
Context manager that turns the input into cooked mode.
"""
@abstractmethod
def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]:
"""
Return a context manager that makes this input active in the current
event loop.
"""
@abstractmethod
def detach(self) -> ContextManager[None]:
"""
Return a context manager that makes sure that this input is not active
in the current event loop.
"""
def close(self) -> None:
"Close input."
pass
|
Input
|
python
|
keras-team__keras
|
keras/src/metrics/probabilistic_metrics.py
|
{
"start": 7885,
"end": 10640
}
|
class ____(reduction_metrics.MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
It expects labels to be provided as integers. If you want to provide labels
that are one-hot encoded, please use the `CategoricalCrossentropy`
metric instead.
There should be `num_classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected
to be a logits tensor. By default, we consider that output
encodes a probability distribution.
axis: (Optional) Defaults to `-1`.
The dimension along which entropy is computed.
Examples:
>>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
>>> # logits = log(y_pred)
>>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
>>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(softmax), 1)
>>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
>>> # [-2.3026, -0.2231, -2.3026]]
>>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
>>> # xent = [0.0513, 2.3026]
>>> # Reduced xent = (0.0513 + 2.3026) / 2
>>> m = keras.metrics.SparseCategoricalCrossentropy()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result()
1.1769392
>>> m.reset_state()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=np.array([0.3, 0.7]))
>>> m.result()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.SparseCategoricalCrossentropy()])
```
"""
def __init__(
self,
name="sparse_categorical_crossentropy",
dtype=None,
from_logits=False,
axis=-1,
):
super().__init__(
sparse_categorical_crossentropy,
name=name,
dtype=dtype,
from_logits=from_logits,
axis=axis,
)
self.from_logits = from_logits
self.axis = axis
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"from_logits": self.from_logits,
"axis": self.axis,
}
|
SparseCategoricalCrossentropy
|
python
|
huggingface__transformers
|
src/transformers/models/vit_msn/modeling_vit_msn.py
|
{
"start": 16674,
"end": 19406
}
|
class ____(ViTMSNPreTrainedModel):
def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False):
r"""
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
"""
super().__init__(config)
self.config = config
self.embeddings = ViTMSNEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = ViTMSNEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> ViTMSNPatchEmbeddings:
return self.embeddings.patch_embeddings
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
interpolate_pos_encoding: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Examples:
```python
>>> from transformers import AutoImageProcessor, ViTMSNModel
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-msn-small")
>>> model = ViTMSNModel.from_pretrained("facebook/vit-msn-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(
pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
return BaseModelOutput(last_hidden_state=sequence_output)
# Caution: We don't have the weights for the classification head yet. This class
# is here for the users that are interested to fine-tune the base model (ViTMSNModel).
@auto_docstring
|
ViTMSNModel
|
python
|
PrefectHQ__prefect
|
tests/server/models/test_work_queues.py
|
{
"start": 6291,
"end": 8284
}
|
class ____:
async def test_update_work_queue(self, session, work_queue):
result = await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(is_paused=True),
)
assert result
updated_queue = await models.work_queues.read_work_queue(
session=session, work_queue_id=work_queue.id
)
assert updated_queue.id == work_queue.id
# relevant attributes should be updated
assert updated_queue.is_paused
# unset attributes should be ignored
assert updated_queue.description == work_queue.description
async def test_update_work_queue_without_name(self, session, work_queue):
assert work_queue.is_paused is False
assert work_queue.concurrency_limit is None
result = await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(
concurrency_limit=3,
is_paused=True,
),
)
assert result
updated_queue = await models.work_queues.read_work_queue(
session=session, work_queue_id=work_queue.id
)
# relevant attributes should be updated
assert updated_queue.is_paused
assert updated_queue.concurrency_limit == 3
# unset attributes should be ignored
assert updated_queue.description == work_queue.description
assert updated_queue.id == work_queue.id
assert updated_queue.name == work_queue.name
async def test_update_work_queue_returns_false_if_does_not_exist(self, session):
result = await models.work_queues.update_work_queue(
session=session,
work_queue_id=str(uuid4()),
work_queue=schemas.actions.WorkQueueUpdate(),
)
assert result is False
|
TestUpdateWorkQueue
|
python
|
django__django
|
django/contrib/auth/hashers.py
|
{
"start": 12813,
"end": 13140
}
|
class ____(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
|
PBKDF2SHA1PasswordHasher
|
python
|
joke2k__faker
|
faker/providers/person/az_AZ/__init__.py
|
{
"start": 152,
"end": 17369
}
|
class ____(PersonProvider):
formats_female = (
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{first_name_male}}",
"{{first_name_female}} {{last_name_unisex}}",
)
formats_male = (
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{first_name_male}}",
"{{first_name_male}} {{last_name_unisex}}",
)
formats = formats_male + formats_female
first_names_female = (
"Adilə",
"Afaq",
"Afət",
"Ağca",
"Aidə",
"Akifə",
"Aliyə",
"Almaz",
"Arifə",
"Arzu",
"Asilə",
"Aybəniz",
"Ayan",
"Ayçiçək",
"Aydan",
"Aygül",
"Aygün",
"Ayla",
"Aynur",
"Aynurə",
"Aysu",
"Aysel",
"Aytən",
"Aytəkin",
"Balaxanım",
"Bənövşə",
"Brilyant",
"Cahan",
"Ceyran",
"Ceyhunə",
"Ceyla",
"Cəmalə",
"Cəmilə",
"Çimnaz",
"Çinarə",
"Çiçək",
"Dilarə",
"Dilşad",
"Dünya",
"Durna",
"Dürdanə",
"Dürnisə",
"Elmira",
"Elnarə",
"Elnurə",
"Esmira",
"Əcəbnaz",
"Ədibə",
"Əminə",
"Əntiqə",
"Əsmər",
"Familə",
"Fatimə",
"Fatihə",
"Fatma",
"Fəridə",
"Fərqanə",
"Fəxriyyə",
"Fidan",
"Fimar",
"Firəngiz",
"Firuzə",
"Fizzə",
"Flora",
"Gülarə",
"Gülay",
"Gülər",
"Güllər",
"Gülçöhrə",
"Gülçin",
"Gülnar",
"Gülnarə",
"Gülnaz",
"Gülnur",
"Gülmira",
"Gültac",
"Gülşən",
"Gülsüm",
"Gültəkin",
"Gülüstan",
"Gülüş",
"Gülxar",
"Gülzar",
"Günay",
"Həcər",
"Hicran",
"Hökümə",
"Hüsniyyə",
"İlhamə",
"İlkanə",
"İlqarə",
"İlnarə",
"İlahə",
"İnarə",
"İnayət",
"İntizar",
"İradə",
"Jalə",
"Kamilə",
"Kəmalə",
"Kifayət",
"Könül",
"Kövsər",
"Laçın",
"Lamiyə",
"Lalə",
"Laləzar",
"Leyla",
"Leyli",
"Ləman",
"Lətifə",
"Lətafət",
"Lütfiyyə",
"Mahirə",
"Mahizər",
"Maisə",
"Mehbarə",
"Mehin",
"Meyransa",
"Mədinə",
"Məhru",
"Məlahət",
"Məleykə",
"Mənsurə",
"Məryəm",
"Mərziyyə",
"Məsmə",
"Mətanət",
"Minurə",
"Münəvvər",
"Müşkünaz",
"Mülayim",
"Nahidə",
"Narıngül",
"Naibə",
"Nailə",
"Nanə",
"Natella",
"Natəvan",
"Nazilə",
"Nazimə",
"Nazlı",
"Nəfisə",
"Nərgiz",
"Nərmin",
"Nərminə",
"Nəzakət",
"Nəzrin",
"Nigar",
"Nihal",
"Nisə",
"Növrəstə",
"Nurlana",
"Nuranə",
"Nübar",
"Nüşabə",
"Ofeliya",
"Pakizə",
"Pərvanə",
"Pərvinə",
"Pəri",
"Pərişan",
"Qaratel",
"Qənirə",
"Qızbəs",
"Qönçə",
"Qumru",
"Raifə",
"Ramilə",
"Rasimə",
"Raziyə",
"Reyhan",
"Reyhanə",
"Rəfiqə",
"Rəhilə",
"Rəmziyyə",
"Rəna",
"Rəqsanə",
"Rəsmiyyə",
"Rəxşəndə",
"Roza",
"Rövşanə",
"Röya",
"Röyal",
"Röyalə",
"Ruhəngiz",
"Ruhiyyə",
"Ruqiyyə",
"Ruxsarə",
"Rübeyda",
"Rübabə",
"Sabirə",
"Sacidə",
"Sahibə",
"Samirə",
"Sayalı",
"Sara",
"Selcan",
"Sevil",
"Sevinc",
"Seyran",
"Səadət",
"Səbinə",
"Səbirə",
"Sədaqət",
"Səfurə",
"Səhər",
"Səidə",
"Səlimə",
"Səmayə",
"Səma",
"Sənubər",
"Səriyyə",
"Sidiqə",
"Sima",
"Simuzər",
"Sitarə",
"Solmaz",
"Sona",
"Suğra",
"Sürəyya",
"Südabə",
"Şahnaz",
"Şeyda",
"Şəfa",
"Şəfiqə",
"Şəhrəbanu",
"Şəmsiyyə",
"Şəlalə",
"Şəhla",
"Şərafət",
"Şükufə",
"Şövkət",
"Tamara",
"Tamilla",
"Tamaşa",
"Təhminə",
"Təranə",
"Tovuz",
"Tomris",
"Turan",
"Turanə",
"Tutu",
"Tükəzban",
"Tünzalə",
"Türkan",
"Türkanə",
"Ulduz",
"Ülkər",
"Ülviyyə",
"Vahidə",
"Validə",
"Vəfa",
"Vüsalə",
"Xalidə",
"Xalisə",
"Xanım",
"Xatirə",
"Xavər",
"Xəyalə",
"Xumar",
"Yasəmən",
"Yaqub",
"Yazgül",
"Yeganə",
"Zarəngiz",
"Zemfira",
"Zeynəb",
"Zeynəgül",
"Zəhra",
"Zəminə",
"Zəkəriyyə",
"Zərif",
"Zərifə",
"Zərnigar",
"Ziba",
"Zibeydə",
"Zinət",
"Zöhrə",
"Züleyxa",
"Zülfiyyə",
"Zümrüd",
)
first_names_male = (
"Adil",
"Adnan",
"Afiq",
"Afil",
"Ağa",
"Ağahadi",
"Ağaxan",
"Ağamalı",
"Ağamusa",
"Ağasəfa",
"Akif",
"Aqil",
"Allahverdi",
"Anar",
"Arif",
"Asif",
"Asəf",
"Asiman",
"Aslan",
"Atabala",
"Atamoğlan",
"Aydın",
"Azad",
"Azər",
"Baba",
"Bahadır",
"Bayram",
"Behbud ",
"Bəhlul",
"Bəhram",
"Bəhruz",
"Bəkir",
"Bəşir",
"Bilal",
"Cabbar",
"Cahangir",
"Cavad",
"Cavid",
"Ceyhun",
"Cəlal",
"Cəlil",
"Cəmil",
"Coşqun",
"Cümşüd",
"Çingiz",
"Dadaş ",
"Dilavər",
"Dönməz",
"Dünyamalı",
"Elvin",
"Elariz",
"Elçin",
"Eldar",
"Eldəniz",
"Elgün",
"Elman",
"Elmir",
"Elmidar",
"Elmar",
"Elməddin",
"Elnur",
"Elton",
"Elturan",
"Elsevər",
"Elxan",
"Elşən",
"Elşad",
"Emin",
"Emil",
"Etibar",
"Etiqad",
"Eyyub",
"Əbdül",
"Ədalət",
"Əflatun",
"Əhməd",
"Əhliman",
"Əhəd",
"Əkbər",
"Ələkbər",
"Ələsgər",
"Əli",
"Əliəkrəm",
"Əliş",
"Əmin",
"Əmir",
"Ənvər",
"Əkrəm",
"Ərtoğrul",
"Əsgər",
"Əşrəf",
"Əyyub",
"Əjdər",
"Faiq",
"Famil ",
"Fazil",
"Fariz",
"Fərhad",
"Fərid",
"Fərda",
"Fərman",
"Fəxri",
"Fəxrəddin",
"Fəyyaz",
"Fərrux",
"Fərahim",
"Fimar",
"Fikrət",
"Firudin",
"Firdovsi",
"Fəhmin",
"Fəzail",
"Fuad ",
"Füzuli",
"Gəray",
"Gülağa",
"Gülbala",
"Gündüz",
"Habil",
"Hafiz",
"Hakim",
"Hacıbəy",
"Heydər",
"Həci",
"Həmid",
"Həsən",
"Həsənağa",
"Həzi",
"Hikmət",
"Hilal",
"Hümbət",
"Hüseyn",
"Hüseynqulu",
"İbiş",
"İbrahim",
"İlham",
"İlkin",
"İlqar",
"İlyas",
"İmam",
"İmran",
"İnqilab",
"İntiqam",
"İsa",
"İsgəndər",
"İslam",
"İsmayıl",
"İsmət",
"İsrafil",
"İxtiyar",
"İzzət",
"Kamal",
"Kamil",
"Kamran",
"Kazım",
"Kəmaləddin",
"Kənan",
"Kərim",
"Kəramət",
"Laçın",
"Lazım",
"Lətif",
"Mahir",
"Mahmud",
"Maqsud",
"Manaf",
"Mehdi",
"Mehdiqulu",
"Mehman",
"Məhəmməd",
"Məhərrəm",
"Məmməd",
"Mənsur",
"Mikayıl",
"Mirəli",
"Mirzə",
"Musa",
"Murad",
"Muxtar",
"Mübariz",
"Mümtaz",
"Münəvvər",
"Müntəzir",
"Mürsəl",
"Müslüm",
"Müzəffər",
"Nadir",
"Nağı",
"Namiq",
"Natiq",
"Nazim",
"Nazir",
"Nakif",
"Nəcəf",
"Nəriman",
"Nəsib",
"Nəsimi",
"Nicat",
"Nihal",
"Nihad",
"Niyaz",
"Niyazi",
"Novruz",
"Nurşən",
"Nurlan",
"Nuru",
"Nurulla",
"Nurşərəf ",
"Nuşirəvan",
"Nürəddin",
"Nüsrət",
"Oqtay",
"Orxan",
"Orduxan",
"Osman",
"Paşa",
"Pərvin",
"Pərviz",
"Polad",
"Pünhan",
"Qabil",
"Qadir",
"Qalib",
"Qasım",
"Qələndər",
"Qədir",
"Qəşəm ",
"Qəzənfər",
"Qiyas",
"Qoşqar",
"Qulam",
"Qulu",
"Qüdrət",
"Rafael",
"Rafiq",
"Ramil",
"Ramid",
"Ramiz ",
"Rasim",
"Razi",
"Rəis",
"Rəsul",
"Rəşad",
"Rəşid",
"Riyad",
"Rizvan",
"Roman",
"Rövşən",
"Ruslan",
"Rüfət",
"Rza",
"Sabir",
"Sadıq",
"Sadiq",
"Saleh",
"Salman ",
"Samir",
"Sarvan",
"Seyfulla",
"Seyfəddin",
"Seymur",
"Seyran",
"Səbuhi",
"Sədaqət",
"Səfər",
"Səlahəddin",
"Səlim",
"Səməd",
"Sənan",
"Sərəncam",
"Sərvər ",
"Sərxan",
"Səxavət",
"Sirac",
"Sübhan",
"Süleyman",
"Sücəddin",
"Şahin ",
"Şamil",
"Şamxal",
"Şahvələd",
"Şahlar",
"Şahmar",
"Şakir",
"Şaban",
"Şərəfəddin",
"Şirin",
"Şirzad",
"Şıxı",
"Şükür",
"Tahir ",
"Talıb",
"Tariyel",
"Teymur",
"Teymuraz",
"Teymurçin",
"Telman",
"Tərlan",
"Tofiq",
"Toğrul",
"Tunar",
"Tural",
"Turan",
"Turxan",
"Urfan",
"Uğur",
"Ülfət",
"Ülvi",
"Ümid",
"Üzeyir",
"Vahid",
"Vaqif ",
"Valeh",
"Valid",
"Vasif",
"Vazeh",
"Vidadi",
"Vilayət",
"Veysəl",
"Vəfadar",
"Vəli",
"Vurğun",
"Vüqar",
"Vüsal",
"Xamis",
"Xalid",
"Xaləddin",
"Xaliq",
"Xankişi",
"Xanlar",
"Xəlil",
"Xəyyam",
"Xəyal",
"Xudu",
"Xudayar",
"Xudaverdi",
"Xosrov",
"Yadigar",
"Yamən",
"Yasin",
"Yaşar",
"Yalçın",
"Yaqub",
"Yavər",
"Yasəf",
"Yəhya",
"Yunis",
"Yusif",
"Zakir",
"Zaman",
"Zamin",
"Zamiq",
"Zamir",
"Zahid",
"Zahir",
"Zaur",
"Zeynal",
"Zeynulla",
"Zərdüşt",
"Ziya",
)
first_names = first_names_male + first_names_female
last_names_male = [
"Əlibəyov",
"Məhərrəmoğlu",
"Əlləzoğlu",
"Şükürov",
"Arifcanov",
"Qurbanov",
"Əlioğlu",
"Nərimanov",
"Kərimov",
"Bağırov",
"Hüseynoğlu",
"İbrahimbəyov",
"Atakişioğlu",
"Camalov",
"Şamiloğlu",
"Rəhimov",
"Vəzirov",
"Cəbrayılov",
"Tapdıqoğlu",
"Hümmətov",
"İldırımoğlu",
"Xəlilov",
"Əmirov",
"Eminov",
"Mahmudov",
"Qabiloğlu",
"Bəşiroğlu",
"Talıbov",
"Süleymanov",
"Ağaoğlu",
"Axundov",
"Musabəyov",
"Əhmədov",
"Orucov",
"Vəlixanov",
"Hacıbəyov",
"Pənahov",
"Şamilov",
"Cəfəroğlu",
"İbrahimoğlu",
"Vəziroğlu",
"Gözəlov",
"Hüseynov",
"Yusifoğlu",
"Nəbioğlu",
"Həsənov",
"Fikrətoğlu",
"Xakimov",
"Sadıxov",
"İdrisoğlu",
"Salamoğlu",
"Yaqubov",
"Sadiqov",
"Əsgəroğlu",
"Bayramov",
"Qasımov",
"Vəlioğlu",
"Sultanov",
"Vilayətoğlu",
"Abdullaoğlu",
"Muxtaroğlu",
"Bakıxanov",
"Nağıoğlu",
"Mirələmov",
"Ədiloğlu",
"Məcnunbəyov",
"Mirəhmədov",
"Qafarov",
"Səmədoğlu",
"Abbasov",
"Sədaqətoğlu",
"Əsgərov",
"Çingizoğlu",
"Fərəcov",
"Rövşənov",
"İbrahimov",
]
last_names_female = [
"Aslanova",
"İsmayılqızı",
"Səlimova",
"İbrahimova",
"Nəzirova",
"Əsgərova",
"Nəsibova",
"Şərifova",
"Bağırova",
"Anarqızı",
"Abdullayeva",
"Rəhmanova",
"Babayeva",
"Cəfərova",
"Sadıxova",
"Mürvətqızı",
"Kərimova",
"Nəbiqızı",
"Əmirova",
"Əfəndiyeva",
"Mahmudova",
"Abbasova",
"Axundova",
"Hüseynova",
"Musabəyova",
"Yusifqızı",
"Nərimanova",
"Əkbərova",
"Əliyeva",
"Ağayeva",
"Vəliyeva",
]
last_names_unisex = [
"Şahbuzlu",
"Tağızadə",
"Əlibəyli",
"Baharlı",
"Talıblı",
"Ələkbərli",
"Abdullazadə",
"Çəmənzəminli",
"İmanzadə",
"Nurəli",
"Seyidbəyli",
"Şirvanzadə",
"Lətifxanlı",
"Əbdülrəhmanlı",
"Cabbarlı",
"Cəfərzadə",
"Süleymanlı",
"Kərimli",
"Abbaszadə",
"Məhərrəmzadə",
"Tapdıqoğlu",
"Məmmədquluzadə",
"Nəbibəyli",
"Məmmədxanlı",
"Qənizadə",
"Budaqlı",
"Hacızadə",
"Nəsirli",
"Məhərrəmli",
"Əzimzadə",
"Makulu",
"Yusifzadə",
"Qutqaşınlı",
"Rzaquluzadə",
"Nəzərli",
"Qurbanlı",
"Şeyxzadə",
"Qoşalı",
"Hüseynbəyli",
"Məmmədzadə",
"Əhmədli",
"Tahirzadə",
"Əlixanlı",
"Cuvarlı",
"Dağlı",
"Şahsuvarlı",
"İsaxanlı",
"Bünyadzadə",
"Həsənzadə",
"Turallı",
"Gəncəli",
"Əfəndizadə",
"Məlikzadə",
"Qoçulu",
"Arzulu",
"İbrahimli",
"Abbaslı",
"Arazlı",
"Sultanlı",
"Vəziroğlu",
"Musaxanlı",
"Nəbioğlu",
"Qarabağlı",
"İdrisli",
"Axundlu",
"Elatlı",
"Salamoğlu",
"Əhmədzadə",
"Rəsulzadə",
"Vəfalı",
"Əlizadə",
"Səfərli",
"Hüseynli",
"Məmmədli",
"Əsgərli",
"Uğurlu",
"Vəlizadə",
"Şükürlü",
"Şıxlı",
"Muğanlı",
"Eyvazlı",
"Əylisli",
"Qəhrəmanlı",
"Əkbərzadə",
"Köçərli",
"Musazadə",
"Sədalı",
"Təhmasibli",
"Eyvazalı",
"Kərimzadə",
"Babanlı",
"Veysəlli",
"Dəmirbəyli",
"Tapdıqlı",
"Nəzirli",
"Rəhimli",
"Seyidzadə",
"Axundzadə",
"İsabəyli",
"Çəmənli",
"Heydərli",
"Qədirzadə",
"Hacıbəyli",
"Qafurbəyli",
"Əmirli",
"Ələkbərzadə",
]
last_names = last_names_male + last_names_female + last_names_unisex
prefixes_female = ("Xanım",)
prefixes_male = ("Cənab", "Bəy", "Müəllim")
prefixes = prefixes_female + prefixes_male
def last_name_male(self) -> str:
return self.random_element(self.last_names_male + self.last_names_unisex)
def last_name_unique_to_male(self) -> str:
return self.random_element(self.last_names_male)
def last_name_female(self) -> str:
return self.random_element(self.last_names_female + self.last_names_unisex)
def last_name_unique_to_female(self) -> str:
return self.random_element(self.last_names_female)
def last_name_unisex(self) -> str:
return self.random_element(self.last_names_unisex)
|
Provider
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/nodes.py
|
{
"start": 19507,
"end": 19822
}
|
class ____(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ("items",)
items: t.List[Expr]
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.List[t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
|
List
|
python
|
marshmallow-code__marshmallow
|
tests/test_schema.py
|
{
"start": 64556,
"end": 66252
}
|
class ____(Schema):
name = fields.Raw(required=True)
def test_serialization_with_required_field():
user = User(name=None)
RequiredUserSchema().dump(user)
def test_deserialization_with_required_field():
with pytest.raises(ValidationError) as excinfo:
RequiredUserSchema().load({})
data, errors = excinfo.value.valid_data, excinfo.value.messages
assert "name" in errors
assert "Missing data for required field." in errors["name"]
assert isinstance(data, dict)
# field value should also not be in output data
assert "name" not in data
def test_deserialization_with_required_field_and_custom_validator():
def validator(val):
if val.lower() not in {"red", "blue"}:
raise ValidationError("Color must be red or blue")
class ValidatingSchema(Schema):
color = fields.String(
required=True,
validate=validator,
)
with pytest.raises(ValidationError) as excinfo:
ValidatingSchema().load({"name": "foo"})
errors = excinfo.value.messages
assert errors
assert "color" in errors
assert "Missing data for required field." in errors["color"]
with pytest.raises(ValidationError) as excinfo:
ValidatingSchema().load({"color": "green"})
errors = excinfo.value.messages
assert "color" in errors
assert "Color must be red or blue" in errors["color"]
def test_serializer_can_specify_nested_object_as_attribute(blog):
class BlogUsernameSchema(Schema):
author_name = fields.String(attribute="user.name")
ser = BlogUsernameSchema()
result = ser.dump(blog)
assert result["author_name"] == blog.user.name
|
RequiredUserSchema
|
python
|
doocs__leetcode
|
solution/3000-3099/3063.Linked List Frequency/Solution.py
|
{
"start": 151,
"end": 493
}
|
class ____:
def frequenciesOfElements(self, head: Optional[ListNode]) -> Optional[ListNode]:
cnt = Counter()
while head:
cnt[head.val] += 1
head = head.next
dummy = ListNode()
for val in cnt.values():
dummy.next = ListNode(val, dummy.next)
return dummy.next
|
Solution
|
python
|
sympy__sympy
|
sympy/categories/baseclasses.py
|
{
"start": 4610,
"end": 5838
}
|
class ____(Morphism):
"""
Represents a morphism which has a name.
Explanation
===========
Names are used to distinguish between morphisms which have the
same domain and codomain: two named morphisms are equal if they
have the same domains, codomains, and names.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f
NamedMorphism(Object("A"), Object("B"), "f")
>>> f.name
'f'
See Also
========
Morphism
"""
def __new__(cls, domain, codomain, name):
if not name:
raise ValueError("Empty morphism names not allowed.")
if not isinstance(name, Str):
name = Str(name)
return Basic.__new__(cls, domain, codomain, name)
@property
def name(self):
"""
Returns the name of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.name
'f'
"""
return self.args[2].name
|
NamedMorphism
|
python
|
dask__distributed
|
distributed/diagnostics/nvml.py
|
{
"start": 1111,
"end": 12133
}
|
class ____(NamedTuple):
has_context: bool
device_info: CudaDeviceInfo | None = None
# Initialisation must occur per-process, so an initialised state is a
# (state, pid) pair
NVML_STATE = (
NVMLState.DISABLED_PYNVML_NOT_AVAILABLE
if pynvml is None
else NVMLState.UNINITIALIZED
)
"""Current initialization state"""
NVML_OWNER_PID = None
"""PID of process that successfully called pynvml.nvmlInit"""
MINIMUM_WSL_VERSION = "512.15"
def is_initialized():
"""Is pynvml initialized on this process?"""
return NVML_STATE == NVMLState.INITIALIZED and NVML_OWNER_PID == os.getpid()
def _in_wsl():
"""Check if we are in Windows Subsystem for Linux; some PyNVML queries are not supported there.
Taken from https://www.scivision.dev/python-detect-wsl/
"""
return "microsoft-standard" in uname().release
def init_once():
"""Idempotent (per-process) initialization of PyNVML
Notes
-----
Modifies global variables NVML_STATE and NVML_OWNER_PID"""
global NVML_STATE, NVML_OWNER_PID
if NVML_STATE in {
NVMLState.DISABLED_PYNVML_NOT_AVAILABLE,
NVMLState.DISABLED_CONFIG,
NVMLState.DISABLED_LIBRARY_NOT_FOUND,
NVMLState.DISABLED_WSL_INSUFFICIENT_DRIVER,
}:
return
elif NVML_STATE == NVMLState.INITIALIZED and NVML_OWNER_PID == os.getpid():
return
elif NVML_STATE == NVMLState.UNINITIALIZED and not dask.config.get(
"distributed.diagnostics.nvml"
):
NVML_STATE = NVMLState.DISABLED_CONFIG
return
elif (
NVML_STATE == NVMLState.INITIALIZED and NVML_OWNER_PID != os.getpid()
) or NVML_STATE == NVMLState.UNINITIALIZED:
try:
pynvml.nvmlInit()
except (
pynvml.NVMLError_LibraryNotFound,
pynvml.NVMLError_DriverNotLoaded,
pynvml.NVMLError_Unknown,
):
NVML_STATE = NVMLState.DISABLED_LIBRARY_NOT_FOUND
return
if _in_wsl() and parse_version(
ensure_unicode(pynvml.nvmlSystemGetDriverVersion())
) < parse_version(MINIMUM_WSL_VERSION):
NVML_STATE = NVMLState.DISABLED_WSL_INSUFFICIENT_DRIVER
return
else:
from distributed.worker import add_gpu_metrics
# initialization was successful
NVML_STATE = NVMLState.INITIALIZED
NVML_OWNER_PID = os.getpid()
add_gpu_metrics()
else:
raise RuntimeError(
f"Unhandled initialisation state ({NVML_STATE=}, {NVML_OWNER_PID=})"
)
def device_get_count():
init_once()
if not is_initialized():
return 0
else:
return pynvml.nvmlDeviceGetCount()
def _pynvml_handles():
count = device_get_count()
if NVML_STATE == NVMLState.DISABLED_PYNVML_NOT_AVAILABLE:
raise RuntimeError("NVML monitoring requires PyNVML and NVML to be installed")
if NVML_STATE == NVMLState.DISABLED_LIBRARY_NOT_FOUND:
raise RuntimeError("PyNVML is installed, but NVML is not")
if NVML_STATE == NVMLState.DISABLED_WSL_INSUFFICIENT_DRIVER:
raise RuntimeError(
"Outdated NVIDIA drivers for WSL, please upgrade to "
f"{MINIMUM_WSL_VERSION} or newer"
)
if NVML_STATE == NVMLState.DISABLED_CONFIG:
raise RuntimeError(
"PyNVML monitoring disabled by 'distributed.diagnostics.nvml' "
"config setting"
)
if count == 0:
raise RuntimeError("No GPUs available")
device = 0
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
if cuda_visible_devices:
device = _parse_cuda_visible_device(cuda_visible_devices.split(",")[0])
return _get_handle(device)
# Port from https://github.com/rapidsai/dask-cuda/blob/0f34116c4f3cdf5dfc0df0dbfeba92655f686716/dask_cuda/utils.py#L403-L437
def _parse_cuda_visible_device(dev):
"""Parses a single CUDA device identifier
A device identifier must either be an integer, a string containing an
integer or a string containing the device's UUID, beginning with prefix
'GPU-' or 'MIG-'.
>>> parse_cuda_visible_device(2)
2
>>> parse_cuda_visible_device('2')
2
>>> parse_cuda_visible_device('GPU-9baca7f5-0f2f-01ac-6b05-8da14d6e9005')
'GPU-9baca7f5-0f2f-01ac-6b05-8da14d6e9005'
>>> parse_cuda_visible_device('Foo')
Traceback (most recent call last):
...
ValueError: Devices in CUDA_VISIBLE_DEVICES must be comma-separated integers or
strings beginning with 'GPU-' or 'MIG-' prefixes.
"""
try:
return int(dev)
except ValueError:
if any(
dev.startswith(prefix)
for prefix in [
"GPU-",
"MIG-",
]
):
return dev
else:
raise ValueError(
"Devices in CUDA_VISIBLE_DEVICES must be comma-separated integers "
"or strings beginning with 'GPU-' or 'MIG-' prefixes."
)
def _running_process_matches(handle):
"""Check whether the current process is same as that of handle
Parameters
----------
handle : pyvnml.nvml.LP_struct_c_nvmlDevice_t
NVML handle to CUDA device
Returns
-------
out : bool
Whether the device handle has a CUDA context on the running process.
"""
init_once()
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
return any(os.getpid() == proc.pid for proc in running_processes)
def has_cuda_context():
"""Check whether the current process already has a CUDA context created.
Returns
-------
out : CudaContext
Object containing information as to whether the current process has a CUDA
context created, and in the positive case containing also information about
the device the context belongs to.
"""
init_once()
if is_initialized():
for index in range(device_get_count()):
handle = pynvml.nvmlDeviceGetHandleByIndex(index)
try:
mig_current_mode, mig_pending_mode = pynvml.nvmlDeviceGetMigMode(handle)
except pynvml.NVMLError_NotSupported:
mig_current_mode = pynvml.NVML_DEVICE_MIG_DISABLE
if mig_current_mode == pynvml.NVML_DEVICE_MIG_ENABLE:
for mig_index in range(pynvml.nvmlDeviceGetMaxMigDeviceCount(handle)):
try:
mig_handle = pynvml.nvmlDeviceGetMigDeviceHandleByIndex(
handle, mig_index
)
except pynvml.NVMLError_NotFound:
# No MIG device with that index
continue
if _running_process_matches(mig_handle):
uuid = pynvml.nvmlDeviceGetUUID(mig_handle)
return CudaContext(
has_context=True,
device_info=CudaDeviceInfo(
uuid=uuid, device_index=index, mig_index=mig_index
),
)
else:
if _running_process_matches(handle):
uuid = pynvml.nvmlDeviceGetUUID(handle)
return CudaContext(
has_context=True,
device_info=CudaDeviceInfo(uuid=uuid, device_index=index),
)
return CudaContext(has_context=False)
def get_device_index_and_uuid(device):
"""Get both device index and UUID from device index or UUID
Parameters
----------
device : int, bytes or str
An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID
of a CUDA (either GPU or MIG) device.
Returns
-------
out : CudaDeviceInfo
Object containing information about the device.
Examples
--------
>>> get_device_index_and_uuid(0) # doctest: +SKIP
{'device-index': 0, 'uuid': 'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'}
>>> get_device_index_and_uuid('GPU-e1006a74-5836-264f-5c26-53d19d212dfe') # doctest: +SKIP
{'device-index': 0, 'uuid': 'GPU-e1006a74-5836-264f-5c26-53d19d212dfe'}
>>> get_device_index_and_uuid('MIG-7feb6df5-eccf-5faa-ab00-9a441867e237') # doctest: +SKIP
{'device-index': 0, 'uuid': 'MIG-7feb6df5-eccf-5faa-ab00-9a441867e237'}
"""
init_once()
try:
device_index = int(device)
device_handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)
uuid = pynvml.nvmlDeviceGetUUID(device_handle)
except ValueError:
uuid = device if isinstance(device, bytes) else bytes(device, "utf-8")
# Validate UUID, get index and UUID as seen with `nvidia-smi -L`
uuid_handle = pynvml.nvmlDeviceGetHandleByUUID(uuid)
device_index = pynvml.nvmlDeviceGetIndex(uuid_handle)
uuid = pynvml.nvmlDeviceGetUUID(uuid_handle)
return CudaDeviceInfo(uuid=uuid, device_index=device_index)
def get_device_mig_mode(device):
"""Get MIG mode for a device index or UUID
Parameters
----------
device: int, bytes or str
An ``int`` with the index of a GPU, or ``bytes`` or ``str`` with the UUID
of a CUDA (either GPU or MIG) device.
Returns
-------
out : list
A ``list`` with two integers ``[current_mode, pending_mode]``.
"""
init_once()
handle = _get_handle(device)
try:
return pynvml.nvmlDeviceGetMigMode(handle)
except pynvml.NVMLError_NotSupported:
return [0, 0]
def _get_handle(device):
try:
device_index = int(device)
return pynvml.nvmlDeviceGetHandleByIndex(device_index)
except ValueError:
uuid = device if isinstance(device, bytes) else bytes(device, "utf-8")
return pynvml.nvmlDeviceGetHandleByUUID(uuid)
def _get_utilization(h):
try:
return pynvml.nvmlDeviceGetUtilizationRates(h).gpu
except pynvml.NVMLError_NotSupported:
return None
def _get_memory_used(h):
try:
return pynvml.nvmlDeviceGetMemoryInfo(h).used
except pynvml.NVMLError_NotSupported:
return None
def _get_memory_total(h):
try:
return pynvml.nvmlDeviceGetMemoryInfo(h).total
except pynvml.NVMLError_NotSupported:
return None
def _get_name(h):
try:
return pynvml.nvmlDeviceGetName(h).decode()
except AttributeError:
return pynvml.nvmlDeviceGetName(h)
except pynvml.NVMLError_NotSupported:
return None
def real_time():
h = _pynvml_handles()
return {
"utilization": _get_utilization(h),
"memory-used": _get_memory_used(h),
}
def one_time():
h = _pynvml_handles()
return {
"memory-total": _get_memory_total(h),
"name": _get_name(h),
}
|
CudaContext
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3/configuration_qwen3.py
|
{
"start": 917,
"end": 9078
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3Model`]. It is used to instantiate a
Qwen3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen3-8B [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151936):
Vocabulary size of the Qwen3 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Qwen3Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 22016):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 32):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
use_sliding_window (`bool`, *optional*, defaults to `False`):
Whether to use sliding window attention.
sliding_window (`int`, *optional*, defaults to 4096):
Sliding window attention (SWA) window size. If not specified, will default to `4096`.
max_window_layers (`int`, *optional*, defaults to 28):
The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
additional layer afterwards will use SWA (Sliding Window Attention).
layer_types (`list`, *optional*):
Attention pattern for each layer.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import Qwen3Model, Qwen3Config
>>> # Initializing a Qwen3 style configuration
>>> configuration = Qwen3Config()
>>> # Initializing a model from the Qwen3-8B style configuration
>>> model = Qwen3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Qwen3`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 151936,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 22016,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 32,
head_dim: Optional[int] = 128,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 32768,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
use_sliding_window: Optional[bool] = False,
sliding_window: Optional[int] = 4096,
max_window_layers: Optional[int] = 28,
layer_types: Optional[list[str]] = None,
attention_dropout: Optional[float] = 0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.use_sliding_window = use_sliding_window
self.sliding_window = sliding_window if self.use_sliding_window else None
self.max_window_layers = max_window_layers
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention"
if self.sliding_window is not None and i >= self.max_window_layers
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Qwen3Config"]
|
Qwen3Config
|
python
|
huggingface__transformers
|
examples/modular-transformers/modular_test_detr.py
|
{
"start": 290,
"end": 341
}
|
class ____(DeformableDetrModel):
pass
|
TestDetrModel
|
python
|
Textualize__textual
|
docs/examples/how-to/center03.py
|
{
"start": 80,
"end": 461
}
|
class ____(App):
"""How to center things."""
CSS = """
Screen {
align: center middle;
}
#hello {
background: blue 50%;
border: wide white;
width: auto;
}
"""
def compose(self) -> ComposeResult:
yield Static("Hello, World!", id="hello")
if __name__ == "__main__":
app = CenterApp()
app.run()
|
CenterApp
|
python
|
huggingface__transformers
|
tests/models/clip/test_modeling_clip.py
|
{
"start": 26148,
"end": 28988
}
|
class ____(unittest.TestCase):
@slow
def test_inference(self):
model_name = "openai/clip-vit-base-patch32"
model = CLIPModel.from_pretrained(model_name, attn_implementation="sdpa").to(torch_device)
processor = CLIPProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(
text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt"
).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[24.5701, 19.3049]], device=torch_device)
torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
# CLIP models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(torch_device)
processor = CLIPProcessor.from_pretrained(
"openai/clip-vit-base-patch32", size={"height": 180, "width": 180}, crop_size={"height": 180, "width": 180}
)
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device)
# interpolate_pos_encodiung false should return value error
with self.assertRaises(ValueError, msg="doesn't match model"):
with torch.no_grad():
model(**inputs, interpolate_pos_encoding=False)
# forward pass
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 26, 768))
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.1538, 0.0322, -0.3235], [0.2893, 0.1135, -0.5708], [0.0461, 0.1540, -0.6018]]
).to(torch_device)
torch.testing.assert_close(
outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=6e-3, atol=4e-4
)
|
CLIPModelIntegrationTest
|
python
|
cython__cython
|
Cython/Debugger/libpython.py
|
{
"start": 20327,
"end": 20535
}
|
class ____(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
|
PyClassObjectPtr
|
python
|
PyCQA__pylint
|
tests/pyreverse/test_writer.py
|
{
"start": 1853,
"end": 9608
}
|
class ____:
"""Config object for tests."""
def __init__(self) -> None:
for attr, value in _DEFAULTS.items():
setattr(self, attr, value)
def _file_lines(path: str) -> list[str]:
# we don't care about the actual encoding, but python3 forces us to pick one
with open(path, encoding="latin1") as stream:
lines = [
line.strip()
for line in stream.readlines()
if (
line.find("squeleton generated by ") == -1
and not line.startswith('__revision__ = "$Id:')
)
]
return [line for line in lines if line]
@pytest.fixture(autouse=True)
def change_to_temp_dir(monkeypatch: MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)
@pytest.fixture()
def setup_dot(
default_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(default_config)
project = get_project(TEST_DATA_DIR)
yield from _setup(project, default_config, default_args, writer)
@pytest.fixture()
def setup_colorized_dot(
colorized_dot_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(colorized_dot_config)
project = get_project(TEST_DATA_DIR, name="colorized")
yield from _setup(project, colorized_dot_config, default_args, writer)
@pytest.fixture()
def setup_no_standalone_dot(
no_standalone_dot_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(no_standalone_dot_config)
project = get_project(TEST_DATA_DIR, name="no_standalone")
yield from _setup(project, no_standalone_dot_config, default_args, writer)
@pytest.fixture()
def setup_type_check_imports_dot(
default_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(default_config)
project = get_project(
os.path.join(os.path.dirname(__file__), "functional", "package_diagrams"),
name="type_check_imports",
)
yield from _setup(project, default_config, default_args, writer)
@pytest.fixture()
def setup_puml(
puml_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(puml_config)
project = get_project(TEST_DATA_DIR)
yield from _setup(project, puml_config, default_args, writer)
@pytest.fixture()
def setup_colorized_puml(
colorized_puml_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(colorized_puml_config)
project = get_project(TEST_DATA_DIR, name="colorized")
yield from _setup(project, colorized_puml_config, default_args, writer)
@pytest.fixture()
def setup_mmd(
mmd_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(mmd_config)
project = get_project(TEST_DATA_DIR)
yield from _setup(project, mmd_config, default_args, writer)
@pytest.fixture()
def setup_html(
html_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(html_config)
project = get_project(TEST_DATA_DIR)
yield from _setup(project, html_config, default_args, writer)
@pytest.fixture()
def setup_depth_limited(
depth_limited_config: PyreverseConfig,
default_args: Sequence[str],
get_project: GetProjectCallable,
) -> Iterator[None]:
writer = DiagramWriter(depth_limited_config)
project = get_project(
TEST_DATA_DIR, name=f"depth_limited_{depth_limited_config.max_depth}"
)
yield from _setup(project, depth_limited_config, default_args, writer)
def _setup(
project: Project,
config: PyreverseConfig,
args: Sequence[str],
writer: DiagramWriter,
) -> Iterator[None]:
linker = Linker(project)
handler = DiadefsHandler(config, args)
dd = DefaultDiadefGenerator(linker, handler).visit(project)
for diagram in dd:
diagram.extract_relationships()
writer.write(dd)
yield
@pytest.mark.usefixtures("setup_dot")
@pytest.mark.parametrize("generated_file", DOT_FILES)
def test_dot_files(generated_file: str) -> None:
_assert_files_are_equal(generated_file)
@pytest.mark.usefixtures("setup_colorized_dot")
@pytest.mark.parametrize("generated_file", COLORIZED_DOT_FILES)
def test_colorized_dot_files(generated_file: str) -> None:
_assert_files_are_equal(generated_file)
@pytest.mark.usefixtures("setup_no_standalone_dot")
@pytest.mark.parametrize("generated_file", NO_STANDALONE_FILES)
def test_no_standalone_dot_files(generated_file: str) -> None:
_assert_files_are_equal(generated_file)
@pytest.mark.usefixtures("setup_type_check_imports_dot")
@pytest.mark.parametrize("generated_file", TYPE_CHECK_IMPORTS_FILES)
def test_type_check_imports_dot_files(generated_file: str) -> None:
_assert_files_are_equal(generated_file)
@pytest.mark.usefixtures("setup_puml")
@pytest.mark.parametrize("generated_file", PUML_FILES)
def test_puml_files(generated_file: str) -> None:
_assert_files_are_equal(generated_file)
@pytest.mark.usefixtures("setup_mmd")
@pytest.mark.parametrize("generated_file", MMD_FILES)
def test_mmd_files(generated_file: str) -> None:
_assert_files_are_equal(generated_file)
@pytest.mark.usefixtures("setup_html")
@pytest.mark.parametrize("generated_file", HTML_FILES)
def test_html_files(generated_file: str) -> None:
_assert_files_are_equal(generated_file)
@pytest.mark.usefixtures("setup_colorized_puml")
@pytest.mark.parametrize("generated_file", COLORIZED_PUML_FILES)
def test_colorized_puml_files(generated_file: str) -> None:
_assert_files_are_equal(generated_file)
@pytest.mark.parametrize("default_max_depth", [0, 1])
@pytest.mark.usefixtures("setup_depth_limited")
def test_depth_limited_write(default_max_depth: int) -> None:
"""Test package diagram generation with a depth limit of 1."""
_assert_files_are_equal(f"packages_depth_limited_{default_max_depth}.dot")
_assert_files_are_equal(f"classes_depth_limited_{default_max_depth}.dot")
def _assert_files_are_equal(generated_file: str) -> None:
expected_file = os.path.join(os.path.dirname(__file__), "data", generated_file)
generated = _file_lines(generated_file)
expected = _file_lines(expected_file)
joined_generated = "\n".join(generated)
joined_expected = "\n".join(expected)
files = f"\n *** expected : {expected_file}, generated : {generated_file} \n"
diff = "\n".join(
line
for line in unified_diff(
joined_expected.splitlines(), joined_generated.splitlines()
)
)
assert joined_expected == joined_generated, f"{files}{diff}"
def test_color_for_stdlib_module(default_config: PyreverseConfig) -> None:
writer = DiagramWriter(default_config)
obj = Mock()
obj.node = Mock()
obj.node.qname.return_value = "collections"
assert writer.get_shape_color(obj) == "grey"
def test_package_name_with_slash(default_config: PyreverseConfig) -> None:
"""Test to check the names of the generated files are corrected
when using an incorrect character like "/" in the package name.
"""
writer = DiagramWriter(default_config)
obj = Mock()
obj.objects = []
obj.get_relationships.return_value = []
obj.title = "test/package/name/with/slash/"
writer.write([obj])
assert os.path.exists("test_package_name_with_slash_.dot")
|
Config
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0113_disable_analytics_addons.py
|
{
"start": 148,
"end": 498
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0112_alter_project_help_text"),
]
operations = [
migrations.AlterField(
model_name="addonsconfig",
name="analytics_enabled",
field=models.BooleanField(default=False),
),
]
|
Migration
|
python
|
huggingface__transformers
|
src/transformers/models/vit_msn/modeling_vit_msn.py
|
{
"start": 13401,
"end": 14724
}
|
class ____(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: ViTMSNConfig):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ViTMSNAttention(config)
self.intermediate = ViTMSNIntermediate(config)
self.output = ViTMSNOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states_norm = self.layernorm_before(hidden_states)
attention_output = self.attention(hidden_states_norm)
# first residual connection
hidden_states = attention_output + hidden_states
# in ViTMSN, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_states)
return layer_output
# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTMSN
|
ViTMSNLayer
|
python
|
jina-ai__jina
|
tests/unit/serve/stream/test_stream.py
|
{
"start": 237,
"end": 4742
}
|
class ____:
def __init__(self, num_requests, prefetch, iterate_sync_in_thread):
self.num_requests = num_requests
self.requests_handled = []
self.results_handled = []
self.request_ids = [random_identity() for _ in range(num_requests)]
self.response_ids = []
args = Namespace()
args.prefetch = prefetch
self.streamer = RequestStreamer(
request_handler=self.request_handler_fn,
result_handler=self.result_handle_fn,
end_of_iter_handler=self.end_of_iter_fn,
prefetch=getattr(args, 'prefetch', 0),
iterate_sync_in_thread=iterate_sync_in_thread,
)
def request_handler_fn(self, request, **kwargs):
self.requests_handled.append(request)
async def task():
rand_sleep = random.uniform(0.1, 0.6)
await asyncio.sleep(rand_sleep)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(self, result):
self.results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn(self):
# with a sync generator, iteration
assert len(self.requests_handled) == self.num_requests
assert len(self.results_handled) <= self.num_requests
def _yield_data_request(self, i):
req = DataRequest()
req.header.request_id = self.request_ids[i]
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
async def _get_async_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
await asyncio.sleep(0.1)
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
@pytest.mark.parametrize('results_in_order', [False, True])
@pytest.mark.parametrize('iterate_sync_in_thread', [False, True])
async def test_request_streamer(
prefetch, num_requests, async_iterator, results_in_order, iterate_sync_in_thread
):
test_streamer = RequestStreamerWrapper(
num_requests, prefetch, iterate_sync_in_thread
)
streamer = test_streamer.streamer
it = (
test_streamer._get_async_requests_iterator()
if async_iterator
else test_streamer._get_sync_requests_iterator()
)
response = streamer.stream(request_iterator=it, results_in_order=results_in_order)
num_responses = 0
async for r in response:
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
if results_in_order:
for req_id, resp_id in zip(
test_streamer.request_ids, test_streamer.response_ids
):
assert req_id == resp_id
@pytest.mark.asyncio
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('iterate_sync_in_thread', [False, True])
async def test_request_streamer_process_single_data(
monkeypatch, num_requests, iterate_sync_in_thread
):
test_streamer = RequestStreamerWrapper(num_requests, 0, iterate_sync_in_thread)
streamer = test_streamer.streamer
def end_of_iter_fn():
# bypass some assertions in RequestStreamerWrapper.end_of_iter_fn
pass
monkeypatch.setattr(streamer, '_end_of_iter_handler', end_of_iter_fn)
it = test_streamer._get_sync_requests_iterator()
num_responses = 0
for req in it:
r = await streamer.process_single_data(request=req)
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
|
RequestStreamerWrapper
|
python
|
pypa__pipenv
|
pipenv/vendor/click/exceptions.py
|
{
"start": 1157,
"end": 2523
}
|
class ____(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message: str, ctx: t.Optional["Context"] = None) -> None:
super().__init__(message)
self.ctx = ctx
self.cmd: t.Optional["Command"] = self.ctx.command if self.ctx else None
def show(self, file: t.Optional[t.IO[t.Any]] = None) -> None:
if file is None:
file = get_text_stderr()
color = None
hint = ""
if (
self.ctx is not None
and self.ctx.command.get_help_option(self.ctx) is not None
):
hint = _("Try '{command} {option}' for help.").format(
command=self.ctx.command_path, option=self.ctx.help_option_names[0]
)
hint = f"{hint}\n"
if self.ctx is not None:
color = self.ctx.color
echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color)
echo(
_("Error: {message}").format(message=self.format_message()),
file=file,
color=color,
)
|
UsageError
|
python
|
openai__openai-python
|
src/openai/types/responses/response_input_item_param.py
|
{
"start": 7383,
"end": 8068
}
|
class ____(TypedDict, total=False):
action: Required[ShellCallAction]
"""The shell commands and limits that describe how to run the tool call."""
call_id: Required[str]
"""The unique ID of the function shell tool call generated by the model."""
type: Required[Literal["shell_call"]]
"""The type of the item. Always `function_shell_call`."""
id: Optional[str]
"""The unique ID of the function shell tool call.
Populated when this item is returned via API.
"""
status: Optional[Literal["in_progress", "completed", "incomplete"]]
"""The status of the shell call.
One of `in_progress`, `completed`, or `incomplete`.
"""
|
ShellCall
|
python
|
dask__dask
|
dask/diagnostics/progress.py
|
{
"start": 680,
"end": 5001
}
|
class ____(Callback):
"""A progress bar for dask.
Parameters
----------
minimum : int, optional
Minimum time threshold in seconds before displaying a progress bar.
Default is 0 (always display)
width : int, optional
Width of the bar
dt : float, optional
Update resolution in seconds, default is 0.1 seconds
out : file object, optional
File object to which the progress bar will be written
It can be ``sys.stdout``, ``sys.stderr`` or any other file object able to write ``str`` objects
Default is ``sys.stdout``
Examples
--------
Below we create a progress bar with a minimum threshold of 1 second before
displaying. For cheap computations nothing is shown:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_fast_computation.compute()
But for expensive computations a full progress bar is displayed:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_slow_computation.compute()
[########################################] | 100% Completed | 10.4 s
The duration of the last computation is available as an attribute
>>> pbar = ProgressBar() # doctest: +SKIP
>>> with pbar: # doctest: +SKIP
... out = some_computation.compute()
[########################################] | 100% Completed | 10.4 s
>>> pbar.last_duration # doctest: +SKIP
10.4
You can also register a progress bar so that it displays for all
computations:
>>> pbar = ProgressBar() # doctest: +SKIP
>>> pbar.register() # doctest: +SKIP
>>> some_slow_computation.compute() # doctest: +SKIP
[########################################] | 100% Completed | 10.4 s
"""
def __init__(self, minimum=0, width=40, dt=0.1, out=None):
if out is None:
# Warning, on windows, stdout can still be None if
# an application is started as GUI Application
# https://docs.python.org/3/library/sys.html#sys.__stderr__
out = sys.stdout
self._minimum = minimum
self._width = width
self._dt = dt
self._file = out
self.last_duration = 0
def _start(self, dsk):
self._state = None
self._start_time = default_timer()
# Start background thread
self._running = True
self._timer = threading.Thread(target=self._timer_func)
self._timer.daemon = True
self._timer.start()
def _pretask(self, key, dsk, state):
self._state = state
if self._file is not None:
self._file.flush()
def _finish(self, dsk, state, errored):
self._running = False
self._timer.join()
elapsed = default_timer() - self._start_time
self.last_duration = elapsed
if elapsed < self._minimum:
return
if not errored:
self._draw_bar(1, elapsed)
else:
self._update_bar(elapsed)
if self._file is not None:
self._file.write("\n")
self._file.flush()
def _timer_func(self):
"""Background thread for updating the progress bar"""
while self._running:
elapsed = default_timer() - self._start_time
if elapsed > self._minimum:
self._update_bar(elapsed)
time.sleep(self._dt)
def _update_bar(self, elapsed):
s = self._state
if not s:
self._draw_bar(0, elapsed)
return
ndone = len(s["finished"])
ntasks = sum(len(s[k]) for k in ["ready", "waiting", "running"]) + ndone
if ndone < ntasks:
self._draw_bar(ndone / ntasks if ntasks else 0, elapsed)
def _draw_bar(self, frac, elapsed):
from dask.utils import format_time
bar = "#" * int(self._width * frac)
percent = int(100 * frac)
elapsed = format_time(elapsed)
msg = "\r[{0:<{1}}] | {2}% Completed | {3}".format(
bar, self._width, percent, elapsed
)
with contextlib.suppress(ValueError):
if self._file is not None:
self._file.write(msg)
self._file.flush()
|
ProgressBar
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/wrappers/utils.py
|
{
"start": 558,
"end": 7982
}
|
class ____:
"""Tracks the mean, variance and count of values."""
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=(), dtype=np.float64):
"""Tracks the mean, variance and count of values."""
self.mean = np.zeros(shape, dtype=dtype)
self.var = np.ones(shape, dtype=dtype)
self.count = epsilon
def update(self, x):
"""Updates the mean, var and count from a batch of samples."""
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
"""Updates from batch mean, variance and count moments."""
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count
)
def update_mean_var_count_from_moments(
mean, var, count, batch_mean, batch_var, batch_count
):
"""Updates the mean, var and count using the previous mean, var, count and batch values."""
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
@singledispatch
def create_zero_array(space: Space[T_cov]) -> T_cov:
"""Creates a zero-based array of a space, this is similar to ``create_empty_array`` except all arrays are valid samples from the space.
As some ``Box`` cases have ``high`` or ``low`` that don't contain zero then the ``create_empty_array`` would in case
create arrays which is not contained in the space.
Args:
space: The space to create a zero array for
Returns:
Valid sample from the space that is as close to zero as possible
"""
if isinstance(space, Space):
raise CustomSpaceError(
f"Space of type `{type(space)}` doesn't have an registered `create_zero_array` function. Register `{type(space)}` for `create_zero_array` to support it."
)
else:
raise TypeError(
f"The space provided to `create_zero_array` is not a gymnasium Space instance, type: {type(space)}, {space}"
)
@create_zero_array.register(Box)
def _create_box_zero_array(space: Box):
zero_array = np.zeros(space.shape, dtype=space.dtype)
zero_array = np.where(space.low > 0, space.low, zero_array)
zero_array = np.where(space.high < 0, space.high, zero_array)
return zero_array
@create_zero_array.register(Discrete)
def _create_discrete_zero_array(space: Discrete):
return space.start
@create_zero_array.register(MultiDiscrete)
def _create_multidiscrete_zero_array(space: MultiDiscrete):
return np.array(space.start, copy=True, dtype=space.dtype)
@create_zero_array.register(MultiBinary)
def _create_array_zero_array(space: MultiBinary):
return np.zeros(space.shape, dtype=space.dtype)
@create_zero_array.register(Tuple)
def _create_tuple_zero_array(space: Tuple):
return tuple(create_zero_array(subspace) for subspace in space.spaces)
@create_zero_array.register(Dict)
def _create_dict_zero_array(space: Dict):
return {key: create_zero_array(subspace) for key, subspace in space.spaces.items()}
@create_zero_array.register(Sequence)
def _create_sequence_zero_array(space: Sequence):
if space.stack:
return create_zero_array(space.stacked_feature_space)
else:
return tuple()
@create_zero_array.register(Text)
def _create_text_zero_array(space: Text):
return "".join(space.characters[0] for _ in range(space.min_length))
@create_zero_array.register(Graph)
def _create_graph_zero_array(space: Graph):
nodes = np.expand_dims(create_zero_array(space.node_space), axis=0)
if space.edge_space is None:
return GraphInstance(nodes=nodes, edges=None, edge_links=None)
else:
edges = np.expand_dims(create_zero_array(space.edge_space), axis=0)
edge_links = np.zeros((1, 2), dtype=np.int64)
return GraphInstance(nodes=nodes, edges=edges, edge_links=edge_links)
@create_zero_array.register(OneOf)
def _create_one_of_zero_array(space: OneOf):
return 0, create_zero_array(space.spaces[0])
def rescale_box(
box: Box,
new_min: np.floating | np.integer | np.ndarray,
new_max: np.floating | np.integer | np.ndarray,
) -> tuple[Box, Callable[[np.ndarray], np.ndarray], Callable[[np.ndarray], np.ndarray]]:
"""Rescale and shift the given box space to match the given bounds.
For unbounded components in the original space, the corresponding target bounds must also be infinite and vice versa.
Args:
box: The box space to rescale
new_min: The new minimum bound
new_max: The new maximum bound
Returns:
A tuple containing the rescaled box space, the forward transformation function (original -> rescaled) and the
backward transformation function (rescaled -> original).
"""
assert isinstance(box, Box)
if not isinstance(new_min, np.ndarray):
assert np.issubdtype(type(new_min), np.integer) or np.issubdtype(
type(new_min), np.floating
)
new_min = np.full(box.shape, new_min)
assert (
new_min.shape == box.shape
), f"{new_min.shape}, {box.shape}, {new_min}, {box.low}"
if not isinstance(new_max, np.ndarray):
assert np.issubdtype(type(new_max), np.integer) or np.issubdtype(
type(new_max), np.floating
)
new_max = np.full(box.shape, new_max)
assert new_max.shape == box.shape
assert np.all((new_min == box.low)[np.isinf(new_min) | np.isinf(box.low)])
assert np.all((new_max == box.high)[np.isinf(new_max) | np.isinf(box.high)])
assert np.all(new_min <= new_max)
assert np.all(box.low <= box.high)
# Imagine the x-axis between the old Box and the y-axis being the new Box
# float128 is not available everywhere
try:
high_low_diff_dtype = np.float128
except AttributeError:
high_low_diff_dtype = np.float64
min_finite = np.isfinite(new_min)
max_finite = np.isfinite(new_max)
both_finite = min_finite & max_finite
high_low_diff = np.array(
box.high[both_finite], dtype=high_low_diff_dtype
) - np.array(box.low[both_finite], dtype=high_low_diff_dtype)
gradient = np.ones_like(new_min, dtype=box.dtype)
gradient[both_finite] = (
new_max[both_finite] - new_min[both_finite]
) / high_low_diff
intercept = np.zeros_like(new_min, dtype=box.dtype)
# In cases where both are finite, the lower operation takes precedence
intercept[max_finite] = new_max[max_finite] - box.high[max_finite]
intercept[min_finite] = (
gradient[min_finite] * -box.low[min_finite] + new_min[min_finite]
)
new_box = Box(
low=new_min,
high=new_max,
shape=box.shape,
dtype=box.dtype,
)
def forward(obs: np.ndarray) -> np.ndarray:
return gradient * obs + intercept
def backward(obs: np.ndarray) -> np.ndarray:
return (obs - intercept) / gradient
return new_box, forward, backward
|
RunningMeanStd
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/python.py
|
{
"start": 1537,
"end": 2031
}
|
class ____(BaseCommand):
"""List all Python interpreters installed with PDM"""
arguments = (verbose_option,)
def handle(self, project: Project, options: Namespace) -> None:
from findpython.providers.rye import RyeProvider
ui = project.core.ui
provider = RyeProvider(root=Path(project.config["python.install_root"]).expanduser())
for version in provider.find_pythons():
ui.echo(f"[success]{version}[/] ({version.executable})")
|
ListCommand
|
python
|
Textualize__textual
|
src/textual/widgets/_header.py
|
{
"start": 1334,
"end": 1721
}
|
class ____(Widget):
"""The space taken up by the clock on the right of the header."""
DEFAULT_CSS = """
HeaderClockSpace {
dock: right;
width: 10;
padding: 0 1;
}
"""
def render(self) -> RenderResult:
"""Render the header clock space.
Returns:
The rendered space.
"""
return ""
|
HeaderClockSpace
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.